code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def del_pipes(self, pipes, *args, **kwargs):
"""
Deletes a sequence of pipes from the ``Dagger`` in the specified order.
Takes optional arguments for ``Dagger.del_pipe``.
Arguments:
- pipes(sequence of valid ``del_pipe`` arguments) Sequence of pipes or
other valid ``Dagger.del_pipe`` arguments to be removed from the
``Dagger`` in the left to right order.
"""
for pipe in pipes:
self.del_pipe(pipe * args, **kwargs) | Deletes a sequence of pipes from the ``Dagger`` in the specified order.
Takes optional arguments for ``Dagger.del_pipe``.
Arguments:
- pipes(sequence of valid ``del_pipe`` arguments) Sequence of pipes or
other valid ``Dagger.del_pipe`` arguments to be removed from the
``Dagger`` in the left to right order. |
def _parse_topic(client, command, actor, args):
"""Parse a TOPIC and update channel state, then dispatch a TOPIC event."""
channel, _, topic = args.partition(" :")
channel = client.server.get_channel(channel)
channel.topic = topic or None
if actor:
actor = User(actor)
client.dispatch_event("TOPIC", actor, channel, topic) | Parse a TOPIC and update channel state, then dispatch a TOPIC event. |
def get_opcodes(self):
"""Returns a list of opcodes. Opcodes are the same as defined by
:py:mod:`difflib`."""
if not self.opcodes:
d, m, opcodes = edit_distance_backpointer(self.seq1, self.seq2,
action_function=self.action_function,
test=self.test)
if self.dist:
assert d == self.dist
if self._matches:
assert m == self._matches
self.dist = d
self._matches = m
self.opcodes = opcodes
return self.opcodes | Returns a list of opcodes. Opcodes are the same as defined by
:py:mod:`difflib`. |
def get(self, key, get_cas=False):
"""
Get a key from server.
:param key: Key's name
:type key: six.string_types
:param get_cas: If true, return (value, cas), where cas is the new CAS value.
:type get_cas: boolean
:return: Returns a key data from server.
:rtype: object
"""
for server in self.servers:
value, cas = server.get(key)
if value is not None:
if get_cas:
return value, cas
else:
return value
if get_cas:
return None, None | Get a key from server.
:param key: Key's name
:type key: six.string_types
:param get_cas: If true, return (value, cas), where cas is the new CAS value.
:type get_cas: boolean
:return: Returns a key data from server.
:rtype: object |
def create_ogr_field_from_definition(field_definition):
"""Helper to create a field from definition.
:param field_definition: The definition of the field (see:
safe.definitions.fields).
:type field_definition: dict
:return: The new ogr field definition.
:rtype: ogr.FieldDefn
"""
if isinstance(field_definition['type'], list):
# Use the first element in the list of type
field_type = field_definition['type'][0]
else:
field_type = field_definition['type']
# Conversion to OGR field
field_type = field_type_converter.get(field_type, ogr.OFTString)
return ogr.FieldDefn(field_definition['field_name'], field_type) | Helper to create a field from definition.
:param field_definition: The definition of the field (see:
safe.definitions.fields).
:type field_definition: dict
:return: The new ogr field definition.
:rtype: ogr.FieldDefn |
def create_object(self, data, view_kwargs):
"""Create an object through sqlalchemy
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object from sqlalchemy
"""
self.before_create_object(data, view_kwargs)
relationship_fields = get_relationships(self.resource.schema, model_field=True)
nested_fields = get_nested_fields(self.resource.schema, model_field=True)
join_fields = relationship_fields + nested_fields
obj = self.model(**{key: value
for (key, value) in data.items() if key not in join_fields})
self.apply_relationships(data, obj)
self.apply_nested_fields(data, obj)
self.session.add(obj)
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
raise JsonApiException("Object creation error: " + str(e), source={'pointer': '/data'})
self.after_create_object(obj, data, view_kwargs)
return obj | Create an object through sqlalchemy
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object from sqlalchemy |
def select(self, selection_specs=None, **kwargs):
"""Applies selection by dimension name
Applies a selection along the dimensions of the object using
keyword arguments. The selection may be narrowed to certain
objects using selection_specs. For container objects the
selection will be applied to all children as well.
Selections may select a specific value, slice or set of values:
* value: Scalar values will select rows along with an exact
match, e.g.:
ds.select(x=3)
* slice: Slices may be declared as tuples of the upper and
lower bound, e.g.:
ds.select(x=(0, 3))
* values: A list of values may be selected using a list or
set, e.g.:
ds.select(x=[0, 1, 2])
Args:
selection_specs: List of specs to match on
A list of types, functions, or type[.group][.label]
strings specifying which objects to apply the
selection on.
**selection: Dictionary declaring selections by dimension
Selections can be scalar values, tuple ranges, lists
of discrete values and boolean arrays
Returns:
Returns an Dimensioned object containing the selected data
or a scalar if a single value was selected
"""
if selection_specs is not None and not isinstance(selection_specs, (list, tuple)):
selection_specs = [selection_specs]
selection = super(DynamicMap, self).select(selection_specs, **kwargs)
def dynamic_select(obj, **dynkwargs):
if selection_specs is not None:
matches = any(obj.matches(spec) for spec in selection_specs)
else:
matches = True
if matches:
return obj.select(**kwargs)
return obj
if not isinstance(selection, DynamicMap):
return dynamic_select(selection)
else:
from ..util import Dynamic
dmap = Dynamic(self, operation=dynamic_select, streams=self.streams)
dmap.data = selection.data
return dmap | Applies selection by dimension name
Applies a selection along the dimensions of the object using
keyword arguments. The selection may be narrowed to certain
objects using selection_specs. For container objects the
selection will be applied to all children as well.
Selections may select a specific value, slice or set of values:
* value: Scalar values will select rows along with an exact
match, e.g.:
ds.select(x=3)
* slice: Slices may be declared as tuples of the upper and
lower bound, e.g.:
ds.select(x=(0, 3))
* values: A list of values may be selected using a list or
set, e.g.:
ds.select(x=[0, 1, 2])
Args:
selection_specs: List of specs to match on
A list of types, functions, or type[.group][.label]
strings specifying which objects to apply the
selection on.
**selection: Dictionary declaring selections by dimension
Selections can be scalar values, tuple ranges, lists
of discrete values and boolean arrays
Returns:
Returns an Dimensioned object containing the selected data
or a scalar if a single value was selected |
def outer_product(vec0: QubitVector, vec1: QubitVector) -> QubitVector:
"""Direct product of qubit vectors
The tensor ranks must match and qubits must be disjoint.
"""
R = vec0.rank
R1 = vec1.rank
N0 = vec0.qubit_nb
N1 = vec1.qubit_nb
if R != R1:
raise ValueError('Incompatibly vectors. Rank must match')
if not set(vec0.qubits).isdisjoint(vec1.qubits):
raise ValueError('Overlapping qubits')
qubits: Qubits = tuple(vec0.qubits) + tuple(vec1.qubits)
tensor = bk.outer(vec0.tensor, vec1.tensor)
# Interleave (super)-operator axes
# R = 1 perm = (0, 1)
# R = 2 perm = (0, 2, 1, 3)
# R = 4 perm = (0, 4, 1, 5, 2, 6, 3, 7)
tensor = bk.reshape(tensor, ([2**N0] * R) + ([2**N1] * R))
perm = [idx for ij in zip(range(0, R), range(R, 2*R)) for idx in ij]
tensor = bk.transpose(tensor, perm)
return QubitVector(tensor, qubits) | Direct product of qubit vectors
The tensor ranks must match and qubits must be disjoint. |
def send_extended(self, address, timestamp, value):
"""Queue an extended datapoint (ie. a string), return True/False for success.
Arguments:
address -- uint64_t representing a unique metric.
timestamp -- uint64_t representing number of nanoseconds (10^-9) since epoch.
value -- string value being stored.
"""
if self.marquise_ctx is None:
raise ValueError("Attempted to write to a closed Marquise handle.")
self.__debug("Supplied address: %s" % address)
if value is None:
raise TypeError("Can't store None as a value.")
value = str(value)
if timestamp is None:
timestamp = self.current_timestamp()
# Use cast() here to make up the C datatypes for dispatch.
# FFI will take care of converting them to the right endianness. I think.
c_address = FFI.cast("uint64_t", address)
c_timestamp = FFI.cast("uint64_t", timestamp)
# c_value needs to be a byte array with a length in bytes
c_value = cstring(value)
c_length = FFI.cast("size_t", len_cstring(value))
self.__debug("Sending extended value '%s' with length of %d" % (value, c_length))
success = MARQUISE_SEND_EXTENDED(self.marquise_ctx, c_address, c_timestamp, c_value, c_length)
if success != 0:
self.__debug("send_extended returned %d, raising exception" % success)
raise RuntimeError("send_extended was unsuccessful, errno is %d" % FFI.errno)
self.__debug("send_extended returned %d" % success)
return True | Queue an extended datapoint (ie. a string), return True/False for success.
Arguments:
address -- uint64_t representing a unique metric.
timestamp -- uint64_t representing number of nanoseconds (10^-9) since epoch.
value -- string value being stored. |
def create_fd (self):
"""Create open file descriptor."""
if self.filename is None:
return i18n.get_encoded_writer(encoding=self.output_encoding,
errors=self.codec_errors)
return codecs.open(self.filename, "wb", self.output_encoding,
self.codec_errors) | Create open file descriptor. |
def plugins_show(what=None, name=None, version=None, details=False):
"""
Show details of available plugins
Parameters
----------
what: Class of plugins e.g., backend
name: Name of the plugin e.g., s3
version: Version of the plugin
details: Show details be shown?
"""
global pluginmgr
return pluginmgr.show(what, name, version, details) | Show details of available plugins
Parameters
----------
what: Class of plugins e.g., backend
name: Name of the plugin e.g., s3
version: Version of the plugin
details: Show details be shown? |
def _wmorlet(f0, sd, sampling_rate, ns=5):
"""
adapted from nitime
returns a complex morlet wavelet in the time domain
Parameters
----------
f0 : center frequency
sd : standard deviation of frequency
sampling_rate : samplingrate
ns : window length in number of standard deviations
"""
st = 1. / (2. * pi * sd)
w_sz = float(int(ns * st * sampling_rate)) # half time window size
t = arange(-w_sz, w_sz + 1, dtype=float) / sampling_rate
w = (exp(-t ** 2 / (2. * st ** 2)) * exp(2j * pi * f0 * t) /
sqrt(sqrt(pi) * st * sampling_rate))
return w | adapted from nitime
returns a complex morlet wavelet in the time domain
Parameters
----------
f0 : center frequency
sd : standard deviation of frequency
sampling_rate : samplingrate
ns : window length in number of standard deviations |
def pdf(self, mu):
"""
PDF for Poisson prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.poisson.pmf(mu, self.lmd0) | PDF for Poisson prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu) |
def add_group(self, name, desc, status):
"""
Add a new group to a network.
"""
existing_group = get_session().query(ResourceGroup).filter(ResourceGroup.name==name, ResourceGroup.network_id==self.id).first()
if existing_group is not None:
raise HydraError("A resource group with name %s is already in network %s"%(name, self.id))
group_i = ResourceGroup()
group_i.name = name
group_i.description = desc
group_i.status = status
get_session().add(group_i)
self.resourcegroups.append(group_i)
return group_i | Add a new group to a network. |
def component_acting_parent_tag(parent_tag, tag):
"""
Only intended for use in getting components, look for tag name of fig-group
and if so, find the first fig tag inside it as the acting parent tag
"""
if parent_tag.name == "fig-group":
if (len(tag.find_previous_siblings("fig")) > 0):
acting_parent_tag = first(extract_nodes(parent_tag, "fig"))
else:
# Do not return the first fig as parent of itself
return None
else:
acting_parent_tag = parent_tag
return acting_parent_tag | Only intended for use in getting components, look for tag name of fig-group
and if so, find the first fig tag inside it as the acting parent tag |
def face_adjacency(faces=None,
mesh=None,
return_edges=False):
"""
Returns an (n,2) list of face indices.
Each pair of faces in the list shares an edge, making them adjacent.
Parameters
----------
faces : (n, 3) int, or None
List of vertex indices representing triangles
mesh : Trimesh object
If passed will used cached edges instead of faces
return_edges : bool
Return the edges shared by adjacent faces
Returns
---------
adjacency : (m,2) int
Indexes of faces that are adjacent
edges: (m,2) int
Only returned if return_edges is True
Indexes of vertices which make up the
edges shared by the adjacent faces
Examples
----------
This is useful for lots of things such as finding
face- connected components:
>>> graph = nx.Graph()
>>> graph.add_edges_from(mesh.face_adjacency)
>>> groups = nx.connected_components(graph_connected)
"""
if mesh is None:
# first generate the list of edges for the current faces
# also return the index for which face the edge is from
edges, edges_face = faces_to_edges(faces,
return_index=True)
# make sure edge rows are sorted
edges.sort(axis=1)
else:
# if passed a mesh, used the cached values
edges = mesh.edges_sorted
edges_face = mesh.edges_face
# this will return the indices for duplicate edges
# every edge appears twice in a well constructed mesh
# so for every row in edge_idx:
# edges[edge_idx[*][0]] == edges[edge_idx[*][1]]
# in this call to group rows we discard edges which
# don't occur twice
edge_groups = grouping.group_rows(edges, require_count=2)
if len(edge_groups) == 0:
log.error('No adjacent faces detected! Did you merge vertices?')
# the pairs of all adjacent faces
# so for every row in face_idx, self.faces[face_idx[*][0]] and
# self.faces[face_idx[*][1]] will share an edge
face_adjacency = edges_face[edge_groups]
# sort pairs so we can search for indexes with ordered pairs
face_adjacency.sort(axis=1)
if return_edges:
face_adjacency_edges = edges[edge_groups[:, 0]]
return face_adjacency, face_adjacency_edges
return face_adjacency | Returns an (n,2) list of face indices.
Each pair of faces in the list shares an edge, making them adjacent.
Parameters
----------
faces : (n, 3) int, or None
List of vertex indices representing triangles
mesh : Trimesh object
If passed will used cached edges instead of faces
return_edges : bool
Return the edges shared by adjacent faces
Returns
---------
adjacency : (m,2) int
Indexes of faces that are adjacent
edges: (m,2) int
Only returned if return_edges is True
Indexes of vertices which make up the
edges shared by the adjacent faces
Examples
----------
This is useful for lots of things such as finding
face- connected components:
>>> graph = nx.Graph()
>>> graph.add_edges_from(mesh.face_adjacency)
>>> groups = nx.connected_components(graph_connected) |
def mmGetPlotConnectionsPerColumn(self, title="Connections per Columns"):
"""
Returns plot of # connections per column.
@return (Plot) plot
"""
plot = Plot(self, title)
connectedCounts = numpy.ndarray(self.getNumColumns(), dtype=uintType)
self.getConnectedCounts(connectedCounts)
plot.addGraph(sorted(connectedCounts.tolist(), reverse=True),
position=211,
xlabel="column", ylabel="# connections")
plot.addHistogram(connectedCounts.tolist(),
position=212,
bins=len(connectedCounts) / 10,
xlabel="# connections", ylabel="# columns")
return plot | Returns plot of # connections per column.
@return (Plot) plot |
def simplify_types(types):
# type: (Iterable[AbstractType]) -> List[AbstractType]
"""Given some types, give simplified types representing the union of types."""
flattened = flatten_types(types)
items = filter_ignored_items(flattened)
items = [simplify_recursive(item) for item in items]
items = merge_items(items)
items = dedupe_types(items)
# We have to remove reundant items after everything has been simplified and
# merged as this simplification may be what makes items redundant.
items = remove_redundant_items(items)
if len(items) > 3:
return [AnyType()]
else:
return items | Given some types, give simplified types representing the union of types. |
def create_build_system(working_dir, buildsys_type=None, package=None, opts=None,
write_build_scripts=False, verbose=False,
build_args=[], child_build_args=[]):
"""Return a new build system that can build the source in working_dir."""
from rez.plugin_managers import plugin_manager
# detect build system if necessary
if not buildsys_type:
clss = get_valid_build_systems(working_dir, package=package)
if not clss:
raise BuildSystemError(
"No build system is associated with the path %s" % working_dir)
if len(clss) != 1:
s = ', '.join(x.name() for x in clss)
raise BuildSystemError(("Source could be built with one of: %s; "
"Please specify a build system") % s)
buildsys_type = iter(clss).next().name()
# create instance of build system
cls_ = plugin_manager.get_plugin_class('build_system', buildsys_type)
return cls_(working_dir,
opts=opts,
package=package,
write_build_scripts=write_build_scripts,
verbose=verbose,
build_args=build_args,
child_build_args=child_build_args) | Return a new build system that can build the source in working_dir. |
def find(self, **filter_args):
"""
Find exactly one resource in scope of this manager, by matching
resource properties against the specified filter arguments, and return
its Python resource object (e.g. for a CPC, a :class:`~zhmcclient.Cpc`
object is returned).
Any resource property may be specified in a filter argument. For
details about filter arguments, see :ref:`Filtering`.
The zhmcclient implementation handles the specified properties in an
optimized way: Properties that can be filtered on the HMC are actually
filtered there (this varies by resource type), and the remaining
properties are filtered on the client side.
If the "name" property is specified as the only filter argument, an
optimized lookup is performed that uses a name-to-URI cache in this
manager object. This this optimized lookup uses the specified match
value for exact matching and is not interpreted as a regular
expression.
Authorization requirements:
* see the `list()` method in the derived classes.
Parameters:
\\**filter_args:
All keyword arguments are used as filter arguments. Specifying no
keyword arguments causes no filtering to happen. See the examples
for usage details.
Returns:
Resource object in scope of this manager object that matches the
filter arguments. This resource object has a minimal set of
properties.
Raises:
:exc:`~zhmcclient.NotFound`: No matching resource found.
:exc:`~zhmcclient.NoUniqueMatch`: More than one matching resource
found.
: Exceptions raised by the `list()` methods in derived resource
manager classes (see :ref:`Resources`).
Examples:
* The following example finds a CPC by its name. Because the 'name'
resource property is also a valid Python variable name, there are
two ways for the caller to specify the filter arguments for this
method:
As named parameters::
cpc = client.cpcs.find(name='CPC001')
As a parameter dictionary::
filter_args = {'name': 'CPC0001'}
cpc = client.cpcs.find(**filter_args)
* The following example finds a CPC by its object ID. Because the
'object-id' resource property is not a valid Python variable name,
the caller can specify the filter argument only as a parameter
dictionary::
filter_args = {'object-id': '12345-abc...de-12345'}
cpc = client.cpcs.find(**filter_args)
"""
obj_list = self.findall(**filter_args)
num_objs = len(obj_list)
if num_objs == 0:
raise NotFound(filter_args, self)
elif num_objs > 1:
raise NoUniqueMatch(filter_args, self, obj_list)
else:
return obj_list[0] | Find exactly one resource in scope of this manager, by matching
resource properties against the specified filter arguments, and return
its Python resource object (e.g. for a CPC, a :class:`~zhmcclient.Cpc`
object is returned).
Any resource property may be specified in a filter argument. For
details about filter arguments, see :ref:`Filtering`.
The zhmcclient implementation handles the specified properties in an
optimized way: Properties that can be filtered on the HMC are actually
filtered there (this varies by resource type), and the remaining
properties are filtered on the client side.
If the "name" property is specified as the only filter argument, an
optimized lookup is performed that uses a name-to-URI cache in this
manager object. This this optimized lookup uses the specified match
value for exact matching and is not interpreted as a regular
expression.
Authorization requirements:
* see the `list()` method in the derived classes.
Parameters:
\\**filter_args:
All keyword arguments are used as filter arguments. Specifying no
keyword arguments causes no filtering to happen. See the examples
for usage details.
Returns:
Resource object in scope of this manager object that matches the
filter arguments. This resource object has a minimal set of
properties.
Raises:
:exc:`~zhmcclient.NotFound`: No matching resource found.
:exc:`~zhmcclient.NoUniqueMatch`: More than one matching resource
found.
: Exceptions raised by the `list()` methods in derived resource
manager classes (see :ref:`Resources`).
Examples:
* The following example finds a CPC by its name. Because the 'name'
resource property is also a valid Python variable name, there are
two ways for the caller to specify the filter arguments for this
method:
As named parameters::
cpc = client.cpcs.find(name='CPC001')
As a parameter dictionary::
filter_args = {'name': 'CPC0001'}
cpc = client.cpcs.find(**filter_args)
* The following example finds a CPC by its object ID. Because the
'object-id' resource property is not a valid Python variable name,
the caller can specify the filter argument only as a parameter
dictionary::
filter_args = {'object-id': '12345-abc...de-12345'}
cpc = client.cpcs.find(**filter_args) |
def _host_libc(self):
"""Use the --libc-dir option if provided, otherwise invoke a host compiler to find libc dev."""
libc_dir_option = self.get_options().libc_dir
if libc_dir_option:
maybe_libc_crti = os.path.join(libc_dir_option, self._LIBC_INIT_OBJECT_FILE)
if os.path.isfile(maybe_libc_crti):
return HostLibcDev(crti_object=maybe_libc_crti,
fingerprint=hash_file(maybe_libc_crti))
raise self.HostLibcDevResolutionError(
"Could not locate {} in directory {} provided by the --libc-dir option."
.format(self._LIBC_INIT_OBJECT_FILE, libc_dir_option))
return self._get_host_libc_from_host_compiler() | Use the --libc-dir option if provided, otherwise invoke a host compiler to find libc dev. |
def _addSpecfile(self, specfile, path):
"""Adds a new specfile entry to SiiContainer.info. See also
:class:`SiiContainer.addSpecfile()`.
:param specfile: the name of an ms-run file
:param path: filedirectory for loading and saving the ``siic`` files
"""
self.info[specfile] = {'path': path, 'qcAttr': None, 'qcCutoff': None,
'qcLargerBetter': None, 'rankAttr': None,
'rankLargerBetter': None
}
self.container[specfile] = dict() | Adds a new specfile entry to SiiContainer.info. See also
:class:`SiiContainer.addSpecfile()`.
:param specfile: the name of an ms-run file
:param path: filedirectory for loading and saving the ``siic`` files |
def set_preference(data, chunk_size):
"""Return the median of the distribution of pairwise L2 Euclidean distances
between samples (the rows of 'data') as the default preference parameter
for Affinity Propagation clustering.
Parameters
----------
data : array of shape (N_samples, N_features)
The data-set submitted for Affinity Propagation clustering.
chunk_size : int
The size of random subsamples from the data-set whose similarity
matrix is computed. The resulting median of the distribution of
pairwise distances between the data-points selected as part of a
given subsample is stored into a list of medians.
Returns
-------
preference : float
The preference parameter for Affinity Propagation clustering is computed
as the median of the list of median pairwise distances between the data-points
selected as part of each of 15 rounds of random subsampling.
"""
N_samples, N_features = data.shape
rng = np.arange(0, N_samples, dtype = int)
medians = []
for i in range(15):
selected_samples = np.random.choice(N_samples, size = chunk_size, replace = False)
samples = data[selected_samples, :]
S = - euclidean_distances(samples, data, squared = True)
n = chunk_size * N_samples - (chunk_size * (chunk_size + 1) / 2)
rows = np.zeros(0, dtype = int)
for i in range(chunk_size):
rows = np.append(rows, np.full(N_samples - i, i, dtype = int))
cols = np.zeros(0, dtype = int)
for i in range(chunk_size):
cols = np.append(cols, np.delete(rng, selected_samples[:i+1]))
triu_indices = tuple((rows, cols))
preference = np.median(S, overwrite_input = True)
medians.append(preference)
del S
if i % 4 == 3:
gc.collect()
preference = np.median(medians)
return preference | Return the median of the distribution of pairwise L2 Euclidean distances
between samples (the rows of 'data') as the default preference parameter
for Affinity Propagation clustering.
Parameters
----------
data : array of shape (N_samples, N_features)
The data-set submitted for Affinity Propagation clustering.
chunk_size : int
The size of random subsamples from the data-set whose similarity
matrix is computed. The resulting median of the distribution of
pairwise distances between the data-points selected as part of a
given subsample is stored into a list of medians.
Returns
-------
preference : float
The preference parameter for Affinity Propagation clustering is computed
as the median of the list of median pairwise distances between the data-points
selected as part of each of 15 rounds of random subsampling. |
def create(self, **kwargs):
"""
Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_post('%s/' % self._type_name, json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0]) | Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object |
def institute(self, institute_id):
"""Featch a single institute from the backend
Args:
institute_id(str)
Returns:
Institute object
"""
LOG.debug("Fetch institute {}".format(institute_id))
institute_obj = self.institute_collection.find_one({
'_id': institute_id
})
if institute_obj is None:
LOG.debug("Could not find institute {0}".format(institute_id))
return institute_obj | Featch a single institute from the backend
Args:
institute_id(str)
Returns:
Institute object |
def cholesky(L, b, P=None):
'''
P A P' = L L'
'''
logger.debug('Solving system of dim {} with cholesky factors'.format(len(b)))
## convert L and U to csr format
is_csr = scipy.sparse.isspmatrix_csr(L)
is_csc = scipy.sparse.isspmatrix_csc(L)
if not is_csr and not is_csc:
warnings.warn('cholesky requires L be in CSR or CSC matrix format. Converting matrix.', scipy.sparse.SparseEfficiencyWarning)
if is_csc:
U = L.transpose()
if not is_csr:
L = L.tocsr()
if not is_csc:
U = L.transpose().tocsr()
assert scipy.sparse.isspmatrix_csr(L)
assert scipy.sparse.isspmatrix_csr(U)
## compute
return LU(L, U, b, P=P) | P A P' = L L' |
def wasb_log_exists(self, remote_log_location):
"""
Check if remote_log_location exists in remote storage
:param remote_log_location: log's location in remote storage
:return: True if location exists else False
"""
try:
return self.hook.check_for_blob(self.wasb_container, remote_log_location)
except Exception:
pass
return False | Check if remote_log_location exists in remote storage
:param remote_log_location: log's location in remote storage
:return: True if location exists else False |
def forward(self, x, w):
"""Forward function.
:param x: Feature indices.
:type x: torch.Tensor of shape (batch_size * length)
:param w: Feature weights.
:type w: torch.Tensor of shape (batch_size * length)
:return: Output of linear layer.
:rtype: torch.Tensor of shape (batch_size, num_classes)
"""
if self.bias is None:
return (w.unsqueeze(2) * self.weight(x)).sum(dim=1)
else:
return (w.unsqueeze(2) * self.weight(x)).sum(dim=1) + self.bias | Forward function.
:param x: Feature indices.
:type x: torch.Tensor of shape (batch_size * length)
:param w: Feature weights.
:type w: torch.Tensor of shape (batch_size * length)
:return: Output of linear layer.
:rtype: torch.Tensor of shape (batch_size, num_classes) |
def bin_number(datapoint, intervals):
"""
Given a datapoint and intervals representing bins, returns the number
represented in binned form, where the bin including the value is
set to 1 and all others are 0.
"""
index = numpy.searchsorted(intervals, datapoint)
return [0 if index != i else 1 for i in range(len(intervals) + 1)] | Given a datapoint and intervals representing bins, returns the number
represented in binned form, where the bin including the value is
set to 1 and all others are 0. |
def validate(self):
"""Validate the resource using its voluptuous schema"""
try:
# update _resource to have default values from the schema
self._resource = self.schema(self._resource)
except MultipleInvalid as e:
errors = [format_error(err, self.resource_type) for err in e.errors]
raise exceptions.ValidationError({'errors': errors})
yield self.check_unique() | Validate the resource using its voluptuous schema |
async def _get_response(self, message):
"""
Get response running the view with await syntax if it is a
coroutine function, otherwise just run it the normal way.
"""
view = self.discovery_view(message)
if not view:
return
if inspect.iscoroutinefunction(view):
response = await view(message)
else:
response = view(message)
return self.prepare_response(response, message) | Get response running the view with await syntax if it is a
coroutine function, otherwise just run it the normal way. |
def _connect_signals(self, model):
"""
Connect signals for the model.
"""
for signal in self._signals:
receiver = self._signals[signal]
signal.connect(receiver, sender=model, dispatch_uid=self._dispatch_uid(signal, model)) | Connect signals for the model. |
def count_divisors(n):
""" Count the number of divisors of an integer n
Args:
n (int): strictly positive integer
Returns:
The number of distinct divisors of n
Raises:
TypeError: if n is not an integer
ValueError: if n is negative
"""
if not isinstance(n, int):
raise TypeError("Expecting a strictly positive integer")
if n <= 0:
raise ValueError("Expecting a strictly positive integer")
number_of_divisors = 1
remain = n
for p in prime_generator():
if p > n:
return number_of_divisors
exponent = 1
while remain % p == 0:
remain = remain // p
exponent += 1
number_of_divisors *= exponent
if remain == 1:
return number_of_divisors | Count the number of divisors of an integer n
Args:
n (int): strictly positive integer
Returns:
The number of distinct divisors of n
Raises:
TypeError: if n is not an integer
ValueError: if n is negative |
def xpathNextAncestor(self, cur):
"""Traversal function for the "ancestor" direction the
ancestor axis contains the ancestors of the context node;
the ancestors of the context node consist of the parent of
context node and the parent's parent and so on; the nodes
are ordered in reverse document order; thus the parent is
the first node on the axis, and the parent's parent is the
second node on the axis """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextAncestor(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextAncestor() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | Traversal function for the "ancestor" direction the
ancestor axis contains the ancestors of the context node;
the ancestors of the context node consist of the parent of
context node and the parent's parent and so on; the nodes
are ordered in reverse document order; thus the parent is
the first node on the axis, and the parent's parent is the
second node on the axis |
def wnexpd(left, right, window):
"""
Expand each of the intervals of a double precision window.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnexpd_c.html
:param left: Amount subtracted from each left endpoint.
:type left: float
:param right: Amount added to each right endpoint.
:type right: float
:param window: Window to be expanded.
:type window: spiceypy.utils.support_types.SpiceCell
:return: Expanded Window.
:rtype: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(window, stypes.SpiceCell)
assert window.dtype == 1
left = ctypes.c_double(left)
right = ctypes.c_double(right)
libspice.wnexpd_c(left, right, ctypes.byref(window))
return window | Expand each of the intervals of a double precision window.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnexpd_c.html
:param left: Amount subtracted from each left endpoint.
:type left: float
:param right: Amount added to each right endpoint.
:type right: float
:param window: Window to be expanded.
:type window: spiceypy.utils.support_types.SpiceCell
:return: Expanded Window.
:rtype: spiceypy.utils.support_types.SpiceCell |
def _partition_estimators(n_estimators, n_jobs):
"""Private function used to partition estimators between jobs."""
# Compute the number of jobs
if n_jobs == -1:
n_jobs = min(cpu_count(), n_estimators)
else:
n_jobs = min(n_jobs, n_estimators)
# Partition estimators between jobs
n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs,
dtype=np.int)
n_estimators_per_job[:n_estimators % n_jobs] += 1
starts = np.cumsum(n_estimators_per_job)
return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist() | Private function used to partition estimators between jobs. |
def append_main_thread(self):
"""create & start main thread
:return: None
"""
thread = MainThread(main_queue=self.main_queue,
main_spider=self.main_spider,
branch_spider=self.branch_spider)
thread.daemon = True
thread.start() | create & start main thread
:return: None |
def idf(posting, document_count):
"""A function to calculate the inverse document frequency for a posting.
This is shared between the builder and the index.
"""
documents_with_term = 0
for field_name in posting:
if field_name == "_index":
continue
documents_with_term += len(posting[field_name].keys())
x = (document_count - documents_with_term + 0.5) / (documents_with_term + 0.5)
return math.log(1 + abs(x)) | A function to calculate the inverse document frequency for a posting.
This is shared between the builder and the index. |
def wiki_pages(self, extra_params=None):
"""
All Wiki Pages with access to this Space
"""
return self.api._get_json(
WikiPage,
space=self,
rel_path=self._build_rel_path('wiki_pages'),
extra_params=extra_params,
) | All Wiki Pages with access to this Space |
def transition(prior_state, next_state):
"""
Transitions to a non-standard state
Raises InvalidStateTransition if next_state is not allowed.
:param prior_state: <str>
:param next_state: <str>
:return: <str>
"""
if next_state not in STATES[prior_state][TRANSITION]:
acceptable = STATES[prior_state][TRANSITION]
err = "cannot {}->{} may only {}->{}".format(prior_state,
next_state,
prior_state,
acceptable)
raise InvalidStateTransition(err)
return next_state | Transitions to a non-standard state
Raises InvalidStateTransition if next_state is not allowed.
:param prior_state: <str>
:param next_state: <str>
:return: <str> |
def to_(self, attrvals):
""" Create a list of Attribute instances.
:param attrvals: A dictionary of attributes and values
:return: A list of Attribute instances
"""
attributes = []
for key, value in attrvals.items():
key = key.lower()
attributes.append(factory(saml.Attribute,
name=key,
name_format=self.name_format,
attribute_value=do_ava(value)))
return attributes | Create a list of Attribute instances.
:param attrvals: A dictionary of attributes and values
:return: A list of Attribute instances |
def get_reminders_per_page(self, per_page=1000, page=1, params=None):
"""
Get reminders per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=REMINDERS, per_page=per_page, page=page, params=params) | Get reminders per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list |
def protege_data(datas_str, sens):
"""
Used to crypt/decrypt data before saving locally.
Override if securit is needed.
bytes -> str when decrypting
str -> bytes when crypting
:param datas_str: When crypting, str. when decrypting bytes
:param sens: True to crypt, False to decrypt
"""
return bytes(datas_str, encoding="utf8") if sens else str(datas_str, encoding="utf8") | Used to crypt/decrypt data before saving locally.
Override if securit is needed.
bytes -> str when decrypting
str -> bytes when crypting
:param datas_str: When crypting, str. when decrypting bytes
:param sens: True to crypt, False to decrypt |
async def kick(self, channel, target, reason=None):
""" Kick user from channel. """
if not self.in_channel(channel):
raise NotInChannel(channel)
if reason:
await self.rawmsg('KICK', channel, target, reason)
else:
await self.rawmsg('KICK', channel, target) | Kick user from channel. |
def createStatus(self,
change_id,
revision_id,
name,
value,
abstain=None,
rerun=None,
comment=None,
url=None,
reporter=None,
category=None,
duration=None):
"""
Abstract the POST REST api documented here:
https://gerrit.googlesource.com/plugins/verify-status/+/master/src/main/resources/Documentation/rest-api-changes.md
:param change_id: The change_id for the change tested (can be in the long form e.g:
myProject~master~I8473b95934b5732ac55d26311a706c9c2bde9940 or in the short integer form).
:param revision_id: the revision_id tested can be the patchset number or
the commit id (short or long).
:param name: The name of the job.
:param value: The pass/fail result for this job: -1: fail 0: unstable, 1: succeed
:param abstain: Whether the value counts as a vote (defaults to false)
:param rerun: Whether this result is from a re-test on the same patchset
:param comment: A short comment about this job
:param url: The url link to more info about this job
:reporter: The user that verified this job
:category: A category for this job
"duration": The time it took to run this job
:return: A deferred with the result from Gerrit.
"""
payload = {'name': name, 'value': value}
if abstain is not None:
payload['abstain'] = abstain
if rerun is not None:
payload['rerun'] = rerun
if comment is not None:
payload['comment'] = comment
if url is not None:
payload['url'] = url
if reporter is not None:
payload['reporter'] = reporter
if category is not None:
payload['category'] = category
if duration is not None:
payload['duration'] = duration
if self._verbose:
log.debug(
'Sending Gerrit status for {change_id}/{revision_id}: data={data}',
change_id=change_id,
revision_id=revision_id,
data=payload)
return self._http.post(
'/'.join([
'/a/changes', str(change_id), 'revisions', str(revision_id),
'verify-status~verifications'
]),
json=payload) | Abstract the POST REST api documented here:
https://gerrit.googlesource.com/plugins/verify-status/+/master/src/main/resources/Documentation/rest-api-changes.md
:param change_id: The change_id for the change tested (can be in the long form e.g:
myProject~master~I8473b95934b5732ac55d26311a706c9c2bde9940 or in the short integer form).
:param revision_id: the revision_id tested can be the patchset number or
the commit id (short or long).
:param name: The name of the job.
:param value: The pass/fail result for this job: -1: fail 0: unstable, 1: succeed
:param abstain: Whether the value counts as a vote (defaults to false)
:param rerun: Whether this result is from a re-test on the same patchset
:param comment: A short comment about this job
:param url: The url link to more info about this job
:reporter: The user that verified this job
:category: A category for this job
"duration": The time it took to run this job
:return: A deferred with the result from Gerrit. |
def complain(self, id, is_spam):
""" http://api.yandex.ru/cleanweb/doc/dg/concepts/complain.xml"""
r = self.request('post', 'http://cleanweb-api.yandex.ru/1.0/complain',
data={'id': id, 'spamtype': 'spam' if is_spam else 'ham'})
return True | http://api.yandex.ru/cleanweb/doc/dg/concepts/complain.xml |
def create(path, value='', acls=None, ephemeral=False, sequence=False, makepath=False, profile=None,
hosts=None, scheme=None, username=None, password=None, default_acl=None):
'''
Create Znode
path
path of znode to create
value
value to assign to znode (Default: '')
acls
list of acl dictionaries to be assigned (Default: None)
ephemeral
indicate node is ephemeral (Default: False)
sequence
indicate node is suffixed with a unique index (Default: False)
makepath
Create parent paths if they do not exist (Default: False)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.create /test/name daniel profile=prod
'''
if acls is None:
acls = []
acls = [make_digest_acl(**acl) for acl in acls]
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
return conn.create(path, salt.utils.stringutils.to_bytes(value), acls, ephemeral, sequence, makepath) | Create Znode
path
path of znode to create
value
value to assign to znode (Default: '')
acls
list of acl dictionaries to be assigned (Default: None)
ephemeral
indicate node is ephemeral (Default: False)
sequence
indicate node is suffixed with a unique index (Default: False)
makepath
Create parent paths if they do not exist (Default: False)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.create /test/name daniel profile=prod |
def _create_polynomial_model(
name: str,
symbol: str,
degree: int,
ds: DataSet,
dss: dict):
"""
Create a polynomial model to describe the specified property based on the
specified data set, and save it to a .json file.
:param name: material name.
:param symbol: property symbol.
:param degree: polynomial degree.
:param ds: the source data set.
:param dss: dictionary of all datasets.
"""
ds_name = ds.name.split(".")[0].lower()
file_name = f"{name.lower()}-{symbol.lower()}-polynomialmodelt-{ds_name}"
newmod = PolynomialModelT.create(ds, symbol, degree)
newmod.plot(dss, _path(f"data/{file_name}.pdf"), False)
newmod.write(_path(f"data/{file_name}.json")) | Create a polynomial model to describe the specified property based on the
specified data set, and save it to a .json file.
:param name: material name.
:param symbol: property symbol.
:param degree: polynomial degree.
:param ds: the source data set.
:param dss: dictionary of all datasets. |
def excluded(filename):
"""
Check if options.exclude contains a pattern that matches filename.
"""
basename = os.path.basename(filename)
for pattern in options.exclude:
if fnmatch(basename, pattern):
# print basename, 'excluded because it matches', pattern
return True | Check if options.exclude contains a pattern that matches filename. |
def save(self, *args, **kwargs):
"""
Extends model ``save()`` to allow dynamic geocoding
"""
self.geocode()
return super(GeoMixin, self).save(*args, **kwargs) | Extends model ``save()`` to allow dynamic geocoding |
def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel):
"""Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID
"""
# Get the base path and figure out the path of the report file.
basePath = permWorkDir
# Form the name of the output csv file that will contain all the results
filename = "%s_HyperSearchJobID.pkl" % (outputLabel,)
filepath = os.path.join(basePath, filename)
return filepath | Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID |
def delete_exchange_for_vhost(self, exchange, vhost, if_unused=False):
"""
Delete an individual exchange. You can add the parameter
``if_unused=True``. This prevents the delete from succeeding if the
exchange is bound to a queue or as a source to another exchange.
:param exchange: The exchange name
:type exchange: str
:param vhost: The vhost name
:type vhost: str
:param if_unused: Set to ``True`` to only delete if it is unused
:type if_unused: bool
"""
self._api_delete(
'/api/exchanges/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(exchange)),
params={
'if-unused': if_unused
},
) | Delete an individual exchange. You can add the parameter
``if_unused=True``. This prevents the delete from succeeding if the
exchange is bound to a queue or as a source to another exchange.
:param exchange: The exchange name
:type exchange: str
:param vhost: The vhost name
:type vhost: str
:param if_unused: Set to ``True`` to only delete if it is unused
:type if_unused: bool |
def open(self):
"""Implementation of NAPALM method open."""
try:
connection = self.transport_class(
host=self.hostname,
username=self.username,
password=self.password,
timeout=self.timeout,
**self.eapi_kwargs
)
if self.device is None:
self.device = pyeapi.client.Node(connection, enablepwd=self.enablepwd)
# does not raise an Exception if unusable
# let's try to run a very simple command
self.device.run_commands(["show clock"], encoding="text")
except ConnectionError as ce:
# and this is raised either if device not avaiable
# either if HTTP(S) agent is not enabled
# show management api http-commands
raise ConnectionException(py23_compat.text_type(ce)) | Implementation of NAPALM method open. |
def set_velocities(self, velocities):
"""
:param velocities (au): list of list of atom velocities
:return:
"""
assert len(velocities) == len(self.mol)
self.params["velocity"] = velocities | :param velocities (au): list of list of atom velocities
:return: |
def _apply_to_data(data, func, unpack_dict=False):
"""Apply a function to data, trying to unpack different data
types.
"""
apply_ = partial(_apply_to_data, func=func, unpack_dict=unpack_dict)
if isinstance(data, dict):
if unpack_dict:
return [apply_(v) for v in data.values()]
return {k: apply_(v) for k, v in data.items()}
if isinstance(data, (list, tuple)):
try:
# e.g.list/tuple of arrays
return [apply_(x) for x in data]
except TypeError:
return func(data)
return func(data) | Apply a function to data, trying to unpack different data
types. |
def weights_prepend_inputs_to_targets(labels):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all nonzero labels past the first zero.
See prepend_mode in common_hparams.py
Args:
labels: A Tensor of int32s.
Returns:
A Tensor of floats.
"""
past_first_zero = tf.cumsum(to_float(tf.equal(labels, 0)), axis=1)
nonzero = to_float(labels)
return to_float(tf.not_equal(past_first_zero * nonzero, 0)) | Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all nonzero labels past the first zero.
See prepend_mode in common_hparams.py
Args:
labels: A Tensor of int32s.
Returns:
A Tensor of floats. |
def _add_person_to_group(person, group):
""" Call datastores after adding a person to a group. """
from karaage.datastores import add_accounts_to_group
from karaage.datastores import add_accounts_to_project
from karaage.datastores import add_accounts_to_institute
a_list = person.account_set
add_accounts_to_group(a_list, group)
for project in group.project_set.all():
add_accounts_to_project(a_list, project)
for institute in group.institute_set.all():
add_accounts_to_institute(a_list, institute) | Call datastores after adding a person to a group. |
def add_latlonalt(self, lat, lon, altitude, terrain_alt=False):
'''add a point via latitude/longitude/altitude'''
if terrain_alt:
frame = mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT
else:
frame = mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT
p = mavutil.mavlink.MAVLink_mission_item_message(self.target_system,
self.target_component,
0,
frame,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,
0, 0, 0, 0, 0, 0,
lat, lon, altitude)
self.add(p) | add a point via latitude/longitude/altitude |
def get_route(ip):
'''
Return routing information for given destination ip
.. versionadded:: 2016.11.5
CLI Example::
salt '*' network.get_route 10.10.10.10
'''
cmd = 'Find-NetRoute -RemoteIPAddress {0}'.format(ip)
out = __salt__['cmd.run'](cmd, shell='powershell', python_shell=True)
regexp = re.compile(
r"^IPAddress\s+:\s(?P<source>[\d\.:]+)?.*"
r"^InterfaceAlias\s+:\s(?P<interface>[\w\.\:\-\ ]+)?.*"
r"^NextHop\s+:\s(?P<gateway>[\d\.:]+)",
flags=re.MULTILINE | re.DOTALL
)
m = regexp.search(out)
ret = {
'destination': ip,
'gateway': m.group('gateway'),
'interface': m.group('interface'),
'source': m.group('source')
}
return ret | Return routing information for given destination ip
.. versionadded:: 2016.11.5
CLI Example::
salt '*' network.get_route 10.10.10.10 |
def _explain(self, tree):
""" Set up the engine to do a dry run of a query """
self._explaining = True
self._call_list = []
old_call = self.connection.call
def fake_call(command, **kwargs):
""" Replacement for connection.call that logs args """
if command == "describe_table":
return old_call(command, **kwargs)
self._call_list.append((command, kwargs))
raise ExplainSignal
self.connection.call = fake_call
try:
ret = self._run(tree[1])
try:
list(ret)
except TypeError:
pass
finally:
self.connection.call = old_call
self._explaining = False | Set up the engine to do a dry run of a query |
def _run_checks(self):
'''basic sanity checks for the file name (and others if needed) before
attempting parsing.
'''
if self.recipe is not None:
# Does the recipe provided exist?
if not os.path.exists(self.recipe):
bot.error("Cannot find %s, is the path correct?" %self.recipe)
sys.exit(1)
# Ensure we carry fullpath
self.recipe = os.path.abspath(self.recipe) | basic sanity checks for the file name (and others if needed) before
attempting parsing. |
def _filter_by_pattern(self, pattern):
"""Filter the Filter the Data Collection based on a list of booleans."""
try:
_len = len(pattern)
except TypeError:
raise TypeError("pattern is not a list of Booleans. Got {}".format(
type(pattern)))
_filt_values = [d for i, d in enumerate(self._values) if pattern[i % _len]]
_filt_datetimes = [d for i, d in enumerate(self.datetimes) if pattern[i % _len]]
return _filt_values, _filt_datetimes | Filter the Filter the Data Collection based on a list of booleans. |
def do_handshake(self):
"""Perform a handshake with the peer
This method forces an explicit handshake to be performed with either
the client or server peer.
"""
_logger.debug("Initiating handshake...")
try:
self._wrap_socket_library_call(
lambda: SSL_do_handshake(self._ssl.value),
ERR_HANDSHAKE_TIMEOUT)
except openssl_error() as err:
if err.ssl_error == SSL_ERROR_SYSCALL and err.result == -1:
raise_ssl_error(ERR_PORT_UNREACHABLE, err)
raise
self._handshake_done = True
_logger.debug("...completed handshake") | Perform a handshake with the peer
This method forces an explicit handshake to be performed with either
the client or server peer. |
def merge_partition(self, partition, path, value):
"""
Merge a value into a partition for a key path.
"""
dct = self.partitions[partition]
*heads, tail = path
for part in heads:
dct = dct.setdefault(part, dict())
dct[tail] = value | Merge a value into a partition for a key path. |
def run(
self,
inputs: Dict[str, Union[float, Iterable]],
covers: Dict[str, Union[float, Iterable]],
torch_size: Optional[int] = None,
) -> Union[float, Iterable]:
"""Executes the FIB over a particular set of inputs and returns the
result.
Args:
inputs: Input set where keys are the names of input nodes in the
GrFN and each key points to a set of input values (or just one).
Returns:
A set of outputs from executing the GrFN, one for every set of
inputs.
"""
# Abort run if covers does not match our expected cover set
if len(covers) != len(self.cover_nodes):
raise ValueError("Incorrect number of cover values.")
# Set the cover node values
for node_name, val in covers.items():
self.nodes[node_name]["value"] = val
return super().run(inputs, torch_size) | Executes the FIB over a particular set of inputs and returns the
result.
Args:
inputs: Input set where keys are the names of input nodes in the
GrFN and each key points to a set of input values (or just one).
Returns:
A set of outputs from executing the GrFN, one for every set of
inputs. |
def configuration(t0: date, t1: Optional[date] = None,
steps_per_day: int = None) -> Tuple[np.ndarray, np.ndarray]:
"""
Get the positions and velocities of the sun and eight planets
Returned as a tuple q, v
q: Nx3 array of positions (x, y, z) in the J2000.0 coordinate frame.
"""
# Default steps_per_day = 1
if steps_per_day is None:
steps_per_day = 1
# Time step dt is 1.0 over steps per day
dt: float = 1.0 / float(steps_per_day)
# Default t1 to one day after t0
if t1 is not None:
# Convert t to a julian day
jd0: int = julian_day(t0)
jd1: int = julian_day(t1)
else:
jd0: int = julian_day(t0)
jd1: int = jd0 + dt
# Pass the times as an array of julian days
jd: np.ndarray = np.arange(jd0, jd1, dt)
# Number of time steps
N: int = len(jd)
# bodies is a list of the celestial bodies considered; should be in an enclosing scope
# Number of bodies
B: int = len(bodies)
# Number of dimensions
dims: int = B * 3
# Initialize empty arrays for position q and velocity v
q: np.ndarray = np.zeros((N, dims))
v: np.ndarray = np.zeros((N, dims))
# Position and velocity of the sun as arrays of length 3
body_ids: List[int] = [jpl_body_id[body] for body in bodies]
# Fill in the position and velocity for each body in order
for i, body_id in enumerate(body_ids):
# The slice of columns for this body (same in q and v)
slice_i = slice(3*i, 3*(i+1))
# Extract the position and velocity from jpl
qi, vi = jpl_kernel[0, body_id].compute_and_differentiate(jd)
# Convert positions from km to meters (multiply by km2m)
q[:, slice_i] = qi.T * km2m
# Convert velocities from km / day to meters / sec (multiply by km2m, divide by day2sec)
v[:, slice_i] = vi.T * (km2m / day2sec)
# Return tuple of Tx6 arrays for position q and velocity v
return q, v | Get the positions and velocities of the sun and eight planets
Returned as a tuple q, v
q: Nx3 array of positions (x, y, z) in the J2000.0 coordinate frame. |
def readline(self, limit=-1, delim=b'\n'):
"""Read a single line.
If EOF is reached before a full line can be read, a partial line is
returned. If *limit* is specified, at most this many bytes will be read.
"""
self._check_readable()
chunks = []
while True:
chunk = self._buffer.get_chunk(limit, delim)
if not chunk:
break
chunks.append(chunk)
if chunk.endswith(delim):
break
if limit >= 0:
limit -= len(chunk)
if limit == 0:
break
if not chunks and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return b''.join(chunks) | Read a single line.
If EOF is reached before a full line can be read, a partial line is
returned. If *limit* is specified, at most this many bytes will be read. |
def iter_all_repos(self, number=-1, since=None, etag=None, per_page=None):
"""Iterate over every repository in the order they were created.
:param int number: (optional), number of repositories to return.
Default: -1, returns all of them
:param int since: (optional), last repository id seen (allows
restarting this iteration)
:param str etag: (optional), ETag from a previous request to the same
endpoint
:param int per_page: (optional), number of repositories to list per
request
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
url = self._build_url('repositories')
return self._iter(int(number), url, Repository,
params={'since': since, 'per_page': per_page},
etag=etag) | Iterate over every repository in the order they were created.
:param int number: (optional), number of repositories to return.
Default: -1, returns all of them
:param int since: (optional), last repository id seen (allows
restarting this iteration)
:param str etag: (optional), ETag from a previous request to the same
endpoint
:param int per_page: (optional), number of repositories to list per
request
:returns: generator of :class:`Repository <github3.repos.Repository>` |
def cmd(send, msg, args):
"""
Search the Twitter API.
Syntax: {command} <query> <--user username> <--count 1>
"""
if not msg:
send('What do you think I am, a bird?')
return
parser = arguments.ArgParser(args['config'])
parser.add_argument('query', nargs='*')
group = parser.add_mutually_exclusive_group()
group.add_argument('--user', dest='user', default=None)
group.add_argument('--count', dest='count', type=int, default=1)
group.add_argument('--random', action='store_true', default=False)
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
api = get_search_api(args['config'])
query = TwitterSearchOrder()
keywords = [' '.join(cmdargs.query)]
if cmdargs.user:
keywords += ['from:{}'.format(cmdargs.user)]
query.set_keywords(keywords)
query.set_language('en')
query.set_result_type('recent')
query.set_include_entities(False)
query.set_count(cmdargs.count)
results = list(api.search_tweets_iterable(query))
if not results:
send('No tweets here!')
return
if cmdargs.random:
shuffle(results)
max_chan_tweets = 5
max_pm_tweets = 25
if cmdargs.count > max_pm_tweets:
send("That's too many tweets! The maximum allowed through PM is {}".format(max_pm_tweets))
return
if cmdargs.count > max_chan_tweets:
send("That's a lot of tweets! The maximum allowed in a channel is {}".format(max_chan_tweets))
for i in range(0, min(cmdargs.count, max_pm_tweets)):
if cmdargs.count <= max_chan_tweets:
send(tweet_text(results[i]))
else:
send(tweet_text(results[i]), target=args['nick']) | Search the Twitter API.
Syntax: {command} <query> <--user username> <--count 1> |
def p_unary_6(self, program):
"""
unary : id '(' expression ')'
"""
# note this is a semantic check, not syntactic
if program[1].name not in self.external_functions:
raise QasmError("Illegal external function call: ",
str(program[1].name))
program[0] = node.External([program[1], program[3]]) | unary : id '(' expression ')' |
def calc_cost(y, yhat, cost_matrix):
"""Calculate the cost with given cost matrix
y : ground truth
yhat : estimation
cost_matrix : array-like, shape=(n_classes, n_classes)
The ith row, jth column represents the cost of the ground truth being
ith class and prediction as jth class.
"""
return np.mean(cost_matrix[list(y), list(yhat)]) | Calculate the cost with given cost matrix
y : ground truth
yhat : estimation
cost_matrix : array-like, shape=(n_classes, n_classes)
The ith row, jth column represents the cost of the ground truth being
ith class and prediction as jth class. |
def finish_registration(self, heart):
"""Second half of engine registration, called after our HeartMonitor
has received a beat from the Engine's Heart."""
try:
(eid,queue,reg,purge) = self.incoming_registrations.pop(heart)
except KeyError:
self.log.error("registration::tried to finish nonexistant registration", exc_info=True)
return
self.log.info("registration::finished registering engine %i:%r", eid, queue)
if purge is not None:
purge.stop()
control = queue
self.ids.add(eid)
self.keytable[eid] = queue
self.engines[eid] = EngineConnector(id=eid, queue=queue, registration=reg,
control=control, heartbeat=heart)
self.by_ident[queue] = eid
self.queues[eid] = list()
self.tasks[eid] = list()
self.completed[eid] = list()
self.hearts[heart] = eid
content = dict(id=eid, queue=self.engines[eid].queue.decode('ascii'))
if self.notifier:
self.session.send(self.notifier, "registration_notification", content=content)
self.log.info("engine::Engine Connected: %i", eid) | Second half of engine registration, called after our HeartMonitor
has received a beat from the Engine's Heart. |
def kibana_config(self):
"""
config kibana
:return:
"""
uncomment("/etc/kibana/kibana.yml", "#server.host:", use_sudo=True)
sed('/etc/kibana/kibana.yml', 'server.host:.*',
'server.host: "{0}"'.format(env.host_string), use_sudo=True)
sudo('systemctl stop kibana.service')
sudo('systemctl daemon-reload')
sudo('systemctl enable kibana.service')
sudo('systemctl start kibana.service') | config kibana
:return: |
def _validate_auths(self, path, obj, app):
""" make sure that apiKey and basicAuth are empty list
in Operation object.
"""
errs = []
for k, v in six.iteritems(obj.authorizations or {}):
if k not in app.raw.authorizations:
errs.append('auth {0} not found in resource list'.format(k))
if app.raw.authorizations[k].type in ('basicAuth', 'apiKey') and v != []:
errs.append('auth {0} should be an empty list'.format(k))
return path, obj.__class__.__name__, errs | make sure that apiKey and basicAuth are empty list
in Operation object. |
def _get_line_array_construct(self):
""" Returns a construct for an array of line data.
"""
from_bus = integer.setResultsName("fbus")
to_bus = integer.setResultsName("tbus")
s_rating = real.setResultsName("s_rating") # MVA
v_rating = real.setResultsName("v_rating") # kV
f_rating = real.setResultsName("f_rating") # Hz
length = real.setResultsName("length") # km (Line only)
v_ratio = real.setResultsName("v_ratio") # kV/kV (Transformer only)
r = real.setResultsName("r") # p.u. or Ohms/km
x = real.setResultsName("x") # p.u. or Henrys/km
b = real.setResultsName("b") # p.u. or Farads/km (Line only)
tap_ratio = real.setResultsName("tap") # p.u./p.u. (Transformer only)
phase_shift = real.setResultsName("shift") # degrees (Transformer only)
i_limit = Optional(real).setResultsName("i_limit") # p.u.
p_limit = Optional(real).setResultsName("p_limit") # p.u.
s_limit = Optional(real).setResultsName("s_limit") # p.u.
status = Optional(boolean).setResultsName("status")
line_data = from_bus + to_bus + s_rating + v_rating + \
f_rating + length + v_ratio + r + x + b + tap_ratio + \
phase_shift + i_limit + p_limit + s_limit + status + scolon
line_data.setParseAction(self.push_line)
line_array = Literal("Line.con") + "=" + "[" + "..." + \
ZeroOrMore(line_data + Optional("]" + scolon))
return line_array | Returns a construct for an array of line data. |
def _compute_metric_names(self):
"""Computes the list of metric names from all the scalar (run, tag) pairs.
The return value is a list of (tag, group) pairs representing the metric
names. The list is sorted in Python tuple-order (lexicographical).
For example, if the scalar (run, tag) pairs are:
("exp/session1", "loss")
("exp/session2", "loss")
("exp/session2/eval", "loss")
("exp/session2/validation", "accuracy")
("exp/no-session", "loss_2"),
and the runs corresponding to sessions are "exp/session1", "exp/session2",
this method will return [("loss", ""), ("loss", "/eval"), ("accuracy",
"/validation")]
More precisely, each scalar (run, tag) pair is converted to a (tag, group)
metric name, where group is the suffix of run formed by removing the
longest prefix which is a session run. If no session run is a prefix of
'run', the pair is skipped.
Returns:
A python list containing pairs. Each pair is a (tag, group) pair
representing a metric name used in some session.
"""
session_runs = self._build_session_runs_set()
metric_names_set = set()
run_to_tag_to_content = self.multiplexer.PluginRunToTagToContent(
scalar_metadata.PLUGIN_NAME)
for (run, tag_to_content) in six.iteritems(run_to_tag_to_content):
session = _find_longest_parent_path(session_runs, run)
if not session:
continue
group = os.path.relpath(run, session)
# relpath() returns "." for the 'session' directory, we use an empty
# string.
if group == ".":
group = ""
metric_names_set.update((tag, group) for tag in tag_to_content.keys())
metric_names_list = list(metric_names_set)
# Sort metrics for determinism.
metric_names_list.sort()
return metric_names_list | Computes the list of metric names from all the scalar (run, tag) pairs.
The return value is a list of (tag, group) pairs representing the metric
names. The list is sorted in Python tuple-order (lexicographical).
For example, if the scalar (run, tag) pairs are:
("exp/session1", "loss")
("exp/session2", "loss")
("exp/session2/eval", "loss")
("exp/session2/validation", "accuracy")
("exp/no-session", "loss_2"),
and the runs corresponding to sessions are "exp/session1", "exp/session2",
this method will return [("loss", ""), ("loss", "/eval"), ("accuracy",
"/validation")]
More precisely, each scalar (run, tag) pair is converted to a (tag, group)
metric name, where group is the suffix of run formed by removing the
longest prefix which is a session run. If no session run is a prefix of
'run', the pair is skipped.
Returns:
A python list containing pairs. Each pair is a (tag, group) pair
representing a metric name used in some session. |
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend() | Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime. |
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name | Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex. |
def date(ctx, year, month, day):
"""
Defines a date value
"""
return _date(conversions.to_integer(year, ctx), conversions.to_integer(month, ctx), conversions.to_integer(day, ctx)) | Defines a date value |
def path(self):
'''Path (list of nodes and actions) from root to this node.'''
node = self
path = []
while node:
path.append((node.action, node.state))
node = node.parent
return list(reversed(path)) | Path (list of nodes and actions) from root to this node. |
def mutate(self, info_in):
"""Replicate an info + mutation.
To mutate an info, that info must have a method called
``_mutated_contents``.
"""
# check self is not failed
if self.failed:
raise ValueError("{} cannot mutate as it has failed.".format(self))
from transformations import Mutation
info_out = type(info_in)(origin=self,
contents=info_in._mutated_contents())
Mutation(info_in=info_in, info_out=info_out) | Replicate an info + mutation.
To mutate an info, that info must have a method called
``_mutated_contents``. |
def ball(rmax=3, rmin=0, shape=128, limits=[-4, 4], draw=True, show=True, **kwargs):
"""Show a ball."""
import ipyvolume.pylab as p3
__, __, __, r, _theta, _phi = xyz(shape=shape, limits=limits, spherical=True)
data = r * 0
data[(r < rmax) & (r >= rmin)] = 0.5
if "data_min" not in kwargs:
kwargs["data_min"] = 0
if "data_max" not in kwargs:
kwargs["data_max"] = 1
data = data.T
if draw:
vol = p3.volshow(data=data, **kwargs)
if show:
p3.show()
return vol
else:
return data | Show a ball. |
def newText(content):
"""Creation of a new text node. """
ret = libxml2mod.xmlNewText(content)
if ret is None:raise treeError('xmlNewText() failed')
return xmlNode(_obj=ret) | Creation of a new text node. |
def assign(self, V, py):
"""Store python value in Value
"""
if isinstance(py, (bytes, unicode)):
for i,C in enumerate(V['value.choices'] or self._choices):
if py==C:
V['value.index'] = i
return
# attempt to parse as integer
V['value.index'] = py | Store python value in Value |
def set_title(self, title):
"""Sets the title.
arg: title (string): the new title
raise: InvalidArgument - ``title`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``title`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.set_title_template
self._my_map['title'] = self._get_display_text(title, self.get_title_metadata()) | Sets the title.
arg: title (string): the new title
raise: InvalidArgument - ``title`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``title`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
def explore_path_encompass(self, task_num, dirpath):
"""
Explore path to discover unsearched directories and save filepaths
:param task_num: Processor ID
:param dirpath: Tuple (base directory, path), path information pulled from unsearched Queue
:return: Directories to add to unsearched Queue
"""
base, path = dirpath
directories = []
nondirectories = []
self._printer("Task: " + str(task_num) + " >>> Explored path: " + path, stream=True)
# Loop through paths
for filename in os.listdir(base + os.sep + path):
fullname = os.path.join(path, filename)
# Append to directories if dir
if os.path.isdir(base + os.sep + fullname):
directories.append((base, fullname))
# Pass filters and append to nondirectories if file
else:
nondirectories.append((base, fullname))
self.add_path(nondirectories)
return directories | Explore path to discover unsearched directories and save filepaths
:param task_num: Processor ID
:param dirpath: Tuple (base directory, path), path information pulled from unsearched Queue
:return: Directories to add to unsearched Queue |
def _detect_content_type(self, filename):
'''Determine the mimetype for a file.
:param filename: Filename of file to detect.
'''
name, ext = os.path.splitext(filename)
if not ext:
raise MessageError('File requires an extension.')
ext = ext.lower()
if ext.lstrip('.') in self._banned_extensions:
err = 'Extension "{0}" is not allowed.'
raise MessageError(err.format(ext))
if not mimetypes.inited:
mimetypes.init()
return mimetypes.types_map.get(ext, self._default_content_type) | Determine the mimetype for a file.
:param filename: Filename of file to detect. |
def get_html_column(self):
""" Get a HTML column for this panel. """
panel_id = "panel_{}".format(self.name)
return ["<h2>{}</h2>".format(self.title) + '<a href="{}">Download data</a>'.format(self.tar_fn())] + [
# list of links
(" <br />" + os.linesep).join(
[
"""
<strong>{bam_name}:</strong>
<a onclick="document.getElementById('{panel_id}').src='{bam_svg}';document.getElementById('{panel_id}_').href='{bam_html}';return false;" href="#">display graph</a>,
<a href="{bam_html}">detailed report</a>
""".format(
bam_name=bam.get_name(),
bam_html=bam.html_fn(),
bam_svg=bam.svg_fn(),
panel_id=panel_id,
) for bam in self.bams
]
) + '<br /> '.format(self.tar_fn()),
# main graph
"""
<div class="formats">
<a href="{html}" id="{panel_id}_">
<img src="{svg}" id="{panel_id}" />
</a>
</div>
""".format(
html=self.bams[0]._html_fn,
svg=self.bams[0]._svg_fn,
panel_id=panel_id,
),
] + [
# overall graphs
"""
<div class="formats">
<img src="{svg}" />
<br />
<a href="{svg}">SVG version</a>
|
<a href="{gp}" type="text/plain">GP file</a>
</div>
""".format(
svg=svg,
gp=self._gp_fn,
) for svg in self._svg_fns
] | Get a HTML column for this panel. |
def channel_angle(im, chanapproxangle=None, *, isshiftdftedge=False,
truesize=None):
"""Extract the channel angle from the rfft
Parameters:
-----------
im: 2d array
The channel image
chanapproxangle: number, optional
If not None, an approximation of the result
isshiftdftedge: boolean, default False
If The image has already been treated:
(edge, dft, fftshift), set to True
truesize: 2 numbers, required if isshiftdftedge is True
The true size of the image
Returns:
--------
angle: number
The channel angle
"""
im = np.asarray(im)
# Compute edge
if not isshiftdftedge:
im = edge(im)
return reg.orientation_angle(im, isshiftdft=isshiftdftedge,
approxangle=chanapproxangle,
truesize=truesize) | Extract the channel angle from the rfft
Parameters:
-----------
im: 2d array
The channel image
chanapproxangle: number, optional
If not None, an approximation of the result
isshiftdftedge: boolean, default False
If The image has already been treated:
(edge, dft, fftshift), set to True
truesize: 2 numbers, required if isshiftdftedge is True
The true size of the image
Returns:
--------
angle: number
The channel angle |
def _process_feature_dbxref(self, limit):
"""
This is the mapping between the flybase features and external
repositories. Generally we want to leave the flybase feature id
as the primary identifier. But we need to make the equivalences/sameAs.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'feature_dbxref'))
LOG.info("processing feature_dbxref mappings")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
(feature_dbxref_id, feature_id, dbxref_id, is_current) = line
# 431890 3091292 596211 t
# 2 9 55044 t
# 3 9 55045 t
# 437595 4551668 277309 t
# 437596 4551662 277307 t
if is_current == 'f':
# not sure what to do with it?
continue
feature_key = feature_id
if self.test_mode and int(feature_key) not in \
self.test_keys['gene'] + self.test_keys['allele']:
continue
if feature_key not in self.idhash['feature']:
# some features may not be found in the hash
# if they are "analysis features"
# LOG.debug("Feature %s not found in hash", feature_key)
continue
feature_id = self.idhash['feature'][feature_key]
dbxref_key = dbxref_id
dbxrefs = self.dbxrefs.get(dbxref_key)
if dbxrefs is not None:
for d in dbxrefs:
# need to filter based on db ?
# TODO make other species' identifiers primary??
# instead of flybase?
did = dbxrefs.get(d)
if did.endswith('&class=protein'):
did = did[0:len(dbxrefs)-15]
# don't make something sameAs itself
if did == feature_id:
continue
dlabel = self.label_hash.get(did)
if re.search(r'FB(gn|og)', feature_id):
# only want to add equivalences for fly things
if not re.match(r'OMIM', did):
# these are only omim diseases, not genes;
# we shouldn't be adding these here anyway
# model.addClassToGraph(did, dlabel)
# model.addXref(feature_id, did)
pass # True # that
elif did is not None and dlabel is not None \
and feature_id is not None:
model.addIndividualToGraph(did, dlabel)
model.addXref(feature_id, did)
line_counter += 1
if not self.test_mode \
and limit is not None and line_counter > limit:
break
# FIXME - some flybase genes are xrefed to OMIM diseases!!!!!!
# for example,
# FBog0000375495 xref to omim 601181 (gene)
# and 608033 (phenotype)
return | This is the mapping between the flybase features and external
repositories. Generally we want to leave the flybase feature id
as the primary identifier. But we need to make the equivalences/sameAs.
:param limit:
:return: |
def is_searchable(self):
"""A bool value that indicates whether the name is a valid name to
search by."""
first = alpha_chars(self.first or u'')
last = alpha_chars(self.last or u'')
raw = alpha_chars(self.raw or u'')
return (len(first) >= 2 and len(last) >= 2) or len(raw) >= 4 | A bool value that indicates whether the name is a valid name to
search by. |
def is_affirmative(self, section, option):
"""
Return true if the section option combo exists and it is set
to a truthy value.
"""
return self.has_option(section, option) and \
lib.is_affirmative(self.get(section, option)) | Return true if the section option combo exists and it is set
to a truthy value. |
def get_updates(
self,
display_all_distributions=False,
verbose=False
): # pragma: no cover
"""
When called, get the environment updates and write updates to a CSV
file and if a new config has been provided, write a new configuration
file.
Args:
display_all_distributions (bool): Return distribution even if it is
up-to-date.
verbose (bool): If ``True``, log to terminal to terminal.
"""
if verbose:
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(message)s',
)
logging.info('Checking installed packages for updates...')
updates = self._get_environment_updates(
display_all_distributions=display_all_distributions
)
if updates:
for update in updates:
logging.info(update)
if updates and self._csv_file_name:
self.write_updates_to_csv(updates)
if updates and self._new_config:
self.write_new_config(updates)
return updates | When called, get the environment updates and write updates to a CSV
file and if a new config has been provided, write a new configuration
file.
Args:
display_all_distributions (bool): Return distribution even if it is
up-to-date.
verbose (bool): If ``True``, log to terminal to terminal. |
def __isListOfTexts(self, docs):
""" Checks whether the input is a list of strings or Text-s;
"""
return isinstance(docs, list) and \
all(isinstance(d, (basestring, Text)) for d in docs) | Checks whether the input is a list of strings or Text-s; |
def simxGetCollisionHandle(clientID, collisionObjectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(collisionObjectName) is str):
collisionObjectName=collisionObjectName.encode('utf-8')
return c_GetCollisionHandle(clientID, collisionObjectName, ct.byref(handle), operationMode), handle.value | Please have a look at the function description/documentation in the V-REP user manual |
def draw(self, time, frametime, target):
"""
Fetch track value for every runnable effect.
If the value is > 0.5 we draw it.
"""
for effect in self.effects:
value = effect.rocket_timeline_track.time_value(time)
if value > 0.5:
effect.draw(time, frametime, target) | Fetch track value for every runnable effect.
If the value is > 0.5 we draw it. |
def call_fn(self, what, *args, **kwargs):
""" Lazy call init_adapter then call the function """
logger.debug('f_{0}:{1}{2}({3})'.format(
self.call_stack_level,
' ' * 4 * self.call_stack_level,
what,
arguments_as_string(args, kwargs)))
port, fn_name = self._what(what)
if port not in self['_initialized_ports']:
self._call_fn(port, 'init_adapter')
self['_initialized_ports'].append(port)
return self._call_fn(port, fn_name, *args, **kwargs) | Lazy call init_adapter then call the function |
def create_tag(self, tag_name=None, **properties):
"""Creates a tag and adds it to the tag table of the TextBuffer.
:param str tag_name:
Name of the new tag, or None
:param **properties:
Keyword list of properties and their values
:returns:
A new tag.
This is equivalent to creating a Gtk.TextTag and then adding the
tag to the buffer's tag table. The returned tag is owned by
the buffer's tag table.
If ``tag_name`` is None, the tag is anonymous.
If ``tag_name`` is not None, a tag called ``tag_name`` must not already
exist in the tag table for this buffer.
Properties are passed as a keyword list of names and values (e.g.
foreground='DodgerBlue', weight=Pango.Weight.BOLD)
"""
tag = Gtk.TextTag(name=tag_name, **properties)
self._get_or_create_tag_table().add(tag)
return tag | Creates a tag and adds it to the tag table of the TextBuffer.
:param str tag_name:
Name of the new tag, or None
:param **properties:
Keyword list of properties and their values
:returns:
A new tag.
This is equivalent to creating a Gtk.TextTag and then adding the
tag to the buffer's tag table. The returned tag is owned by
the buffer's tag table.
If ``tag_name`` is None, the tag is anonymous.
If ``tag_name`` is not None, a tag called ``tag_name`` must not already
exist in the tag table for this buffer.
Properties are passed as a keyword list of names and values (e.g.
foreground='DodgerBlue', weight=Pango.Weight.BOLD) |
def get_dataset(self, key, info):
"""Get the data from the files."""
logger.debug("Getting raw data")
res = super(HRITGOESFileHandler, self).get_dataset(key, info)
self.mda['calibration_parameters'] = self._get_calibration_params()
res = self.calibrate(res, key.calibration)
new_attrs = info.copy()
new_attrs.update(res.attrs)
res.attrs = new_attrs
res.attrs['platform_name'] = self.platform_name
res.attrs['sensor'] = 'goes_imager'
return res | Get the data from the files. |
def set_pending_boot_mode(self, boot_mode):
"""Sets the boot mode of the system for next boot.
:param boot_mode: either 'uefi' or 'legacy'.
:raises: IloInvalidInputError, on an invalid input.
:raises: IloError, on an error from iLO.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
if boot_mode.upper() not in BOOT_MODE_MAP_REV.keys():
msg = (('Invalid Boot mode: "%(boot_mode)s" specified, valid boot '
'modes are either "uefi" or "legacy"')
% {'boot_mode': boot_mode})
raise exception.IloInvalidInputError(msg)
try:
sushy_system.bios_settings.pending_settings.set_pending_boot_mode(
BOOT_MODE_MAP_REV.get(boot_mode.upper()))
except sushy.exceptions.SushyError as e:
msg = (self._('The Redfish controller failed to set '
'pending boot mode to %(boot_mode)s. '
'Error: %(error)s') %
{'boot_mode': boot_mode, 'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg) | Sets the boot mode of the system for next boot.
:param boot_mode: either 'uefi' or 'legacy'.
:raises: IloInvalidInputError, on an invalid input.
:raises: IloError, on an error from iLO. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.