Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
6,000 | nschloe/meshplex | meshplex/reader.py | read | def read(filename):
"""Reads an unstructured mesh with added data.
:param filenames: The files to read from.
:type filenames: str
:returns mesh{2,3}d: The mesh data.
:returns point_data: Point data read from file.
:type point_data: dict
:returns field_data: Field data read from file.
:type field_data: dict
"""
mesh = meshio.read(filename)
# make sure to include the used nodes only
if "tetra" in mesh.cells:
points, cells = _sanitize(mesh.points, mesh.cells["tetra"])
return (
MeshTetra(points, cells),
mesh.point_data,
mesh.cell_data,
mesh.field_data,
)
elif "triangle" in mesh.cells:
points, cells = _sanitize(mesh.points, mesh.cells["triangle"])
return (
MeshTri(points, cells),
mesh.point_data,
mesh.cell_data,
mesh.field_data,
)
else:
raise RuntimeError("Unknown mesh type.") | python | def read(filename):
"""Reads an unstructured mesh with added data.
:param filenames: The files to read from.
:type filenames: str
:returns mesh{2,3}d: The mesh data.
:returns point_data: Point data read from file.
:type point_data: dict
:returns field_data: Field data read from file.
:type field_data: dict
"""
mesh = meshio.read(filename)
# make sure to include the used nodes only
if "tetra" in mesh.cells:
points, cells = _sanitize(mesh.points, mesh.cells["tetra"])
return (
MeshTetra(points, cells),
mesh.point_data,
mesh.cell_data,
mesh.field_data,
)
elif "triangle" in mesh.cells:
points, cells = _sanitize(mesh.points, mesh.cells["triangle"])
return (
MeshTri(points, cells),
mesh.point_data,
mesh.cell_data,
mesh.field_data,
)
else:
raise RuntimeError("Unknown mesh type.") | ['def', 'read', '(', 'filename', ')', ':', 'mesh', '=', 'meshio', '.', 'read', '(', 'filename', ')', '# make sure to include the used nodes only', 'if', '"tetra"', 'in', 'mesh', '.', 'cells', ':', 'points', ',', 'cells', '=', '_sanitize', '(', 'mesh', '.', 'points', ',', 'mesh', '.', 'cells', '[', '"tetra"', ']', ')', 'return', '(', 'MeshTetra', '(', 'points', ',', 'cells', ')', ',', 'mesh', '.', 'point_data', ',', 'mesh', '.', 'cell_data', ',', 'mesh', '.', 'field_data', ',', ')', 'elif', '"triangle"', 'in', 'mesh', '.', 'cells', ':', 'points', ',', 'cells', '=', '_sanitize', '(', 'mesh', '.', 'points', ',', 'mesh', '.', 'cells', '[', '"triangle"', ']', ')', 'return', '(', 'MeshTri', '(', 'points', ',', 'cells', ')', ',', 'mesh', '.', 'point_data', ',', 'mesh', '.', 'cell_data', ',', 'mesh', '.', 'field_data', ',', ')', 'else', ':', 'raise', 'RuntimeError', '(', '"Unknown mesh type."', ')'] | Reads an unstructured mesh with added data.
:param filenames: The files to read from.
:type filenames: str
:returns mesh{2,3}d: The mesh data.
:returns point_data: Point data read from file.
:type point_data: dict
:returns field_data: Field data read from file.
:type field_data: dict | ['Reads', 'an', 'unstructured', 'mesh', 'with', 'added', 'data', '.'] | train | https://github.com/nschloe/meshplex/blob/376cfe8ce7b9917e5398c5d60c87455ff5590913/meshplex/reader.py#L26-L57 |
6,001 | arviz-devs/arviz | arviz/plots/violinplot.py | cat_hist | def cat_hist(val, shade, ax, **kwargs_shade):
"""Auxiliary function to plot discrete-violinplots."""
bins = get_bins(val)
binned_d, _ = np.histogram(val, bins=bins, normed=True)
bin_edges = np.linspace(np.min(val), np.max(val), len(bins))
centers = 0.5 * (bin_edges + np.roll(bin_edges, 1))[:-1]
heights = np.diff(bin_edges)
lefts = -0.5 * binned_d
ax.barh(centers, binned_d, height=heights, left=lefts, alpha=shade, **kwargs_shade) | python | def cat_hist(val, shade, ax, **kwargs_shade):
"""Auxiliary function to plot discrete-violinplots."""
bins = get_bins(val)
binned_d, _ = np.histogram(val, bins=bins, normed=True)
bin_edges = np.linspace(np.min(val), np.max(val), len(bins))
centers = 0.5 * (bin_edges + np.roll(bin_edges, 1))[:-1]
heights = np.diff(bin_edges)
lefts = -0.5 * binned_d
ax.barh(centers, binned_d, height=heights, left=lefts, alpha=shade, **kwargs_shade) | ['def', 'cat_hist', '(', 'val', ',', 'shade', ',', 'ax', ',', '*', '*', 'kwargs_shade', ')', ':', 'bins', '=', 'get_bins', '(', 'val', ')', 'binned_d', ',', '_', '=', 'np', '.', 'histogram', '(', 'val', ',', 'bins', '=', 'bins', ',', 'normed', '=', 'True', ')', 'bin_edges', '=', 'np', '.', 'linspace', '(', 'np', '.', 'min', '(', 'val', ')', ',', 'np', '.', 'max', '(', 'val', ')', ',', 'len', '(', 'bins', ')', ')', 'centers', '=', '0.5', '*', '(', 'bin_edges', '+', 'np', '.', 'roll', '(', 'bin_edges', ',', '1', ')', ')', '[', ':', '-', '1', ']', 'heights', '=', 'np', '.', 'diff', '(', 'bin_edges', ')', 'lefts', '=', '-', '0.5', '*', 'binned_d', 'ax', '.', 'barh', '(', 'centers', ',', 'binned_d', ',', 'height', '=', 'heights', ',', 'left', '=', 'lefts', ',', 'alpha', '=', 'shade', ',', '*', '*', 'kwargs_shade', ')'] | Auxiliary function to plot discrete-violinplots. | ['Auxiliary', 'function', 'to', 'plot', 'discrete', '-', 'violinplots', '.'] | train | https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/plots/violinplot.py#L127-L137 |
6,002 | MIT-LCP/wfdb-python | wfdb/io/record.py | check_np_array | def check_np_array(item, field_name, ndim, parent_class, channel_num=None):
"""
Check a numpy array's shape and dtype against required
specifications.
Parameters
----------
item : numpy array
The numpy array to check
field_name : str
The name of the field to check
ndim : int
The required number of dimensions
parent_class : type
The parent class of the dtype. ie. np.integer, np.floating.
channel_num : int, optional
If not None, indicates that the item passed in is a subelement
of a list. Indicate this in the error message if triggered.
"""
# Check shape
if item.ndim != ndim:
error_msg = 'Field `%s` must have ndim == %d' % (field_name, ndim)
if channel_num is not None:
error_msg = ('Channel %d of f' % channel_num) + error_msg[1:]
raise TypeError(error_msg)
# Check dtype
if not np.issubdtype(item.dtype, parent_class):
error_msg = 'Field `%s` must have a dtype that subclasses %s' % (field_name, parent_class)
if channel_num is not None:
error_msg = ('Channel %d of f' % channel_num) + error_msg[1:]
raise TypeError(error_msg) | python | def check_np_array(item, field_name, ndim, parent_class, channel_num=None):
"""
Check a numpy array's shape and dtype against required
specifications.
Parameters
----------
item : numpy array
The numpy array to check
field_name : str
The name of the field to check
ndim : int
The required number of dimensions
parent_class : type
The parent class of the dtype. ie. np.integer, np.floating.
channel_num : int, optional
If not None, indicates that the item passed in is a subelement
of a list. Indicate this in the error message if triggered.
"""
# Check shape
if item.ndim != ndim:
error_msg = 'Field `%s` must have ndim == %d' % (field_name, ndim)
if channel_num is not None:
error_msg = ('Channel %d of f' % channel_num) + error_msg[1:]
raise TypeError(error_msg)
# Check dtype
if not np.issubdtype(item.dtype, parent_class):
error_msg = 'Field `%s` must have a dtype that subclasses %s' % (field_name, parent_class)
if channel_num is not None:
error_msg = ('Channel %d of f' % channel_num) + error_msg[1:]
raise TypeError(error_msg) | ['def', 'check_np_array', '(', 'item', ',', 'field_name', ',', 'ndim', ',', 'parent_class', ',', 'channel_num', '=', 'None', ')', ':', '# Check shape', 'if', 'item', '.', 'ndim', '!=', 'ndim', ':', 'error_msg', '=', "'Field `%s` must have ndim == %d'", '%', '(', 'field_name', ',', 'ndim', ')', 'if', 'channel_num', 'is', 'not', 'None', ':', 'error_msg', '=', '(', "'Channel %d of f'", '%', 'channel_num', ')', '+', 'error_msg', '[', '1', ':', ']', 'raise', 'TypeError', '(', 'error_msg', ')', '# Check dtype', 'if', 'not', 'np', '.', 'issubdtype', '(', 'item', '.', 'dtype', ',', 'parent_class', ')', ':', 'error_msg', '=', "'Field `%s` must have a dtype that subclasses %s'", '%', '(', 'field_name', ',', 'parent_class', ')', 'if', 'channel_num', 'is', 'not', 'None', ':', 'error_msg', '=', '(', "'Channel %d of f'", '%', 'channel_num', ')', '+', 'error_msg', '[', '1', ':', ']', 'raise', 'TypeError', '(', 'error_msg', ')'] | Check a numpy array's shape and dtype against required
specifications.
Parameters
----------
item : numpy array
The numpy array to check
field_name : str
The name of the field to check
ndim : int
The required number of dimensions
parent_class : type
The parent class of the dtype. ie. np.integer, np.floating.
channel_num : int, optional
If not None, indicates that the item passed in is a subelement
of a list. Indicate this in the error message if triggered. | ['Check', 'a', 'numpy', 'array', 's', 'shape', 'and', 'dtype', 'against', 'required', 'specifications', '.'] | train | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/record.py#L912-L944 |
6,003 | jhuapl-boss/intern | intern/service/boss/v1/project.py | ProjectService_1.delete | def delete(self, resource, url_prefix, auth, session, send_opts):
"""Deletes the entity described by the given resource.
Args:
resource (intern.resource.boss.BossResource)
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure.
"""
req = self.get_request(
resource, 'DELETE', 'application/json', url_prefix, auth)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 204:
return
err = ('Delete failed on {}, got HTTP response: ({}) - {}'.format(
resource.name, resp.status_code, resp.text))
raise HTTPError(err, request = req, response = resp) | python | def delete(self, resource, url_prefix, auth, session, send_opts):
"""Deletes the entity described by the given resource.
Args:
resource (intern.resource.boss.BossResource)
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure.
"""
req = self.get_request(
resource, 'DELETE', 'application/json', url_prefix, auth)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 204:
return
err = ('Delete failed on {}, got HTTP response: ({}) - {}'.format(
resource.name, resp.status_code, resp.text))
raise HTTPError(err, request = req, response = resp) | ['def', 'delete', '(', 'self', ',', 'resource', ',', 'url_prefix', ',', 'auth', ',', 'session', ',', 'send_opts', ')', ':', 'req', '=', 'self', '.', 'get_request', '(', 'resource', ',', "'DELETE'", ',', "'application/json'", ',', 'url_prefix', ',', 'auth', ')', 'prep', '=', 'session', '.', 'prepare_request', '(', 'req', ')', 'resp', '=', 'session', '.', 'send', '(', 'prep', ',', '*', '*', 'send_opts', ')', 'if', 'resp', '.', 'status_code', '==', '204', ':', 'return', 'err', '=', '(', "'Delete failed on {}, got HTTP response: ({}) - {}'", '.', 'format', '(', 'resource', '.', 'name', ',', 'resp', '.', 'status_code', ',', 'resp', '.', 'text', ')', ')', 'raise', 'HTTPError', '(', 'err', ',', 'request', '=', 'req', ',', 'response', '=', 'resp', ')'] | Deletes the entity described by the given resource.
Args:
resource (intern.resource.boss.BossResource)
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
requests.HTTPError on failure. | ['Deletes', 'the', 'entity', 'described', 'by', 'the', 'given', 'resource', '.'] | train | https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/service/boss/v1/project.py#L834-L856 |
6,004 | moonso/loqusdb | loqusdb/commands/update.py | update | def update(ctx, variant_file, sv_variants, family_file, family_type, skip_case_id, gq_treshold,
case_id, ensure_index, max_window):
"""Load the variants of a case
A variant is loaded if it is observed in any individual of a case
If no family file is provided all individuals in vcf file will be considered.
"""
if not (family_file or case_id):
LOG.warning("Please provide a family file or a case id")
ctx.abort()
if not (variant_file or sv_variants):
LOG.warning("Please provide a VCF file")
ctx.abort()
variant_path = None
if variant_file:
variant_path = os.path.abspath(variant_file)
variant_sv_path = None
if sv_variants:
variant_sv_path = os.path.abspath(sv_variants)
adapter = ctx.obj['adapter']
start_inserting = datetime.now()
try:
nr_inserted = update_database(
adapter=adapter,
variant_file=variant_path,
sv_file=variant_sv_path,
family_file=family_file,
family_type=family_type,
skip_case_id=skip_case_id,
case_id=case_id,
gq_treshold=gq_treshold,
max_window=max_window,
)
except (SyntaxError, CaseError, IOError, VcfError) as error:
LOG.warning(error)
ctx.abort()
LOG.info("Nr variants inserted: %s", nr_inserted)
LOG.info("Time to insert variants: {0}".format(
datetime.now() - start_inserting))
if ensure_index:
adapter.ensure_indexes()
else:
adapter.check_indexes() | python | def update(ctx, variant_file, sv_variants, family_file, family_type, skip_case_id, gq_treshold,
case_id, ensure_index, max_window):
"""Load the variants of a case
A variant is loaded if it is observed in any individual of a case
If no family file is provided all individuals in vcf file will be considered.
"""
if not (family_file or case_id):
LOG.warning("Please provide a family file or a case id")
ctx.abort()
if not (variant_file or sv_variants):
LOG.warning("Please provide a VCF file")
ctx.abort()
variant_path = None
if variant_file:
variant_path = os.path.abspath(variant_file)
variant_sv_path = None
if sv_variants:
variant_sv_path = os.path.abspath(sv_variants)
adapter = ctx.obj['adapter']
start_inserting = datetime.now()
try:
nr_inserted = update_database(
adapter=adapter,
variant_file=variant_path,
sv_file=variant_sv_path,
family_file=family_file,
family_type=family_type,
skip_case_id=skip_case_id,
case_id=case_id,
gq_treshold=gq_treshold,
max_window=max_window,
)
except (SyntaxError, CaseError, IOError, VcfError) as error:
LOG.warning(error)
ctx.abort()
LOG.info("Nr variants inserted: %s", nr_inserted)
LOG.info("Time to insert variants: {0}".format(
datetime.now() - start_inserting))
if ensure_index:
adapter.ensure_indexes()
else:
adapter.check_indexes() | ['def', 'update', '(', 'ctx', ',', 'variant_file', ',', 'sv_variants', ',', 'family_file', ',', 'family_type', ',', 'skip_case_id', ',', 'gq_treshold', ',', 'case_id', ',', 'ensure_index', ',', 'max_window', ')', ':', 'if', 'not', '(', 'family_file', 'or', 'case_id', ')', ':', 'LOG', '.', 'warning', '(', '"Please provide a family file or a case id"', ')', 'ctx', '.', 'abort', '(', ')', 'if', 'not', '(', 'variant_file', 'or', 'sv_variants', ')', ':', 'LOG', '.', 'warning', '(', '"Please provide a VCF file"', ')', 'ctx', '.', 'abort', '(', ')', 'variant_path', '=', 'None', 'if', 'variant_file', ':', 'variant_path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'variant_file', ')', 'variant_sv_path', '=', 'None', 'if', 'sv_variants', ':', 'variant_sv_path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'sv_variants', ')', 'adapter', '=', 'ctx', '.', 'obj', '[', "'adapter'", ']', 'start_inserting', '=', 'datetime', '.', 'now', '(', ')', 'try', ':', 'nr_inserted', '=', 'update_database', '(', 'adapter', '=', 'adapter', ',', 'variant_file', '=', 'variant_path', ',', 'sv_file', '=', 'variant_sv_path', ',', 'family_file', '=', 'family_file', ',', 'family_type', '=', 'family_type', ',', 'skip_case_id', '=', 'skip_case_id', ',', 'case_id', '=', 'case_id', ',', 'gq_treshold', '=', 'gq_treshold', ',', 'max_window', '=', 'max_window', ',', ')', 'except', '(', 'SyntaxError', ',', 'CaseError', ',', 'IOError', ',', 'VcfError', ')', 'as', 'error', ':', 'LOG', '.', 'warning', '(', 'error', ')', 'ctx', '.', 'abort', '(', ')', 'LOG', '.', 'info', '(', '"Nr variants inserted: %s"', ',', 'nr_inserted', ')', 'LOG', '.', 'info', '(', '"Time to insert variants: {0}"', '.', 'format', '(', 'datetime', '.', 'now', '(', ')', '-', 'start_inserting', ')', ')', 'if', 'ensure_index', ':', 'adapter', '.', 'ensure_indexes', '(', ')', 'else', ':', 'adapter', '.', 'check_indexes', '(', ')'] | Load the variants of a case
A variant is loaded if it is observed in any individual of a case
If no family file is provided all individuals in vcf file will be considered. | ['Load', 'the', 'variants', 'of', 'a', 'case'] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/update.py#L62-L114 |
6,005 | PaulHancock/Aegean | AegeanTools/regions.py | Region.without | def without(self, other):
"""
Subtract another Region by performing a difference operation on their pixlists.
Requires both regions to have the same maxdepth.
Parameters
----------
other : :class:`AegeanTools.regions.Region`
The region to be combined.
"""
# work only on the lowest level
# TODO: Allow this to be done for regions with different depths.
if not (self.maxdepth == other.maxdepth): raise AssertionError("Regions must have the same maxdepth")
self._demote_all()
opd = set(other.get_demoted())
self.pixeldict[self.maxdepth].difference_update(opd)
self._renorm()
return | python | def without(self, other):
"""
Subtract another Region by performing a difference operation on their pixlists.
Requires both regions to have the same maxdepth.
Parameters
----------
other : :class:`AegeanTools.regions.Region`
The region to be combined.
"""
# work only on the lowest level
# TODO: Allow this to be done for regions with different depths.
if not (self.maxdepth == other.maxdepth): raise AssertionError("Regions must have the same maxdepth")
self._demote_all()
opd = set(other.get_demoted())
self.pixeldict[self.maxdepth].difference_update(opd)
self._renorm()
return | ['def', 'without', '(', 'self', ',', 'other', ')', ':', '# work only on the lowest level', '# TODO: Allow this to be done for regions with different depths.', 'if', 'not', '(', 'self', '.', 'maxdepth', '==', 'other', '.', 'maxdepth', ')', ':', 'raise', 'AssertionError', '(', '"Regions must have the same maxdepth"', ')', 'self', '.', '_demote_all', '(', ')', 'opd', '=', 'set', '(', 'other', '.', 'get_demoted', '(', ')', ')', 'self', '.', 'pixeldict', '[', 'self', '.', 'maxdepth', ']', '.', 'difference_update', '(', 'opd', ')', 'self', '.', '_renorm', '(', ')', 'return'] | Subtract another Region by performing a difference operation on their pixlists.
Requires both regions to have the same maxdepth.
Parameters
----------
other : :class:`AegeanTools.regions.Region`
The region to be combined. | ['Subtract', 'another', 'Region', 'by', 'performing', 'a', 'difference', 'operation', 'on', 'their', 'pixlists', '.'] | train | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/regions.py#L287-L305 |
6,006 | etingof/pyasn1 | pyasn1/type/univ.py | SequenceAndSetBase.setComponentByPosition | def setComponentByPosition(self, idx, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx : :class:`int`
Component index (zero-based). Must either refer to existing
component (if *componentType* is set) or to N+1 component
otherwise. In the latter case a new component of given ASN.1
type gets instantiated and appended to |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
componentType = self.componentType
componentTypeLen = self._componentTypeLen
try:
currentValue = self._componentValues[idx]
except IndexError:
currentValue = noValue
if componentTypeLen:
if componentTypeLen < idx:
raise error.PyAsn1Error('component index out of range')
self._componentValues = [noValue] * componentTypeLen
if value is noValue:
if componentTypeLen:
value = componentType.getTypeByPosition(idx)
if isinstance(value, base.AbstractConstructedAsn1Item):
value = value.clone(cloneValueFlag=componentType[idx].isDefaulted)
elif currentValue is noValue:
raise error.PyAsn1Error('Component type not defined')
elif not isinstance(value, base.Asn1Item):
if componentTypeLen:
subComponentType = componentType.getTypeByPosition(idx)
if isinstance(subComponentType, base.AbstractSimpleAsn1Item):
value = subComponentType.clone(value=value)
else:
raise error.PyAsn1Error('%s can cast only scalar values' % componentType.__class__.__name__)
elif currentValue is not noValue and isinstance(currentValue, base.AbstractSimpleAsn1Item):
value = currentValue.clone(value=value)
else:
raise error.PyAsn1Error('%s undefined component type' % componentType.__class__.__name__)
elif (matchTags or matchConstraints) and componentTypeLen:
subComponentType = componentType.getTypeByPosition(idx)
if subComponentType is not noValue:
subtypeChecker = (self.strictConstraints and
subComponentType.isSameTypeWith or
subComponentType.isSuperTypeOf)
if not subtypeChecker(value, matchTags, matchConstraints):
if not componentType[idx].openType:
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
if verifyConstraints and value.isValue:
try:
self.subtypeSpec(value, idx)
except error.PyAsn1Error:
exType, exValue, exTb = sys.exc_info()
raise exType('%s at %s' % (exValue, self.__class__.__name__))
if componentTypeLen or idx in self._dynamicNames:
self._componentValues[idx] = value
elif len(self._componentValues) == idx:
self._componentValues.append(value)
self._dynamicNames.addField(idx)
else:
raise error.PyAsn1Error('Component index out of range')
return self | python | def setComponentByPosition(self, idx, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx : :class:`int`
Component index (zero-based). Must either refer to existing
component (if *componentType* is set) or to N+1 component
otherwise. In the latter case a new component of given ASN.1
type gets instantiated and appended to |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
componentType = self.componentType
componentTypeLen = self._componentTypeLen
try:
currentValue = self._componentValues[idx]
except IndexError:
currentValue = noValue
if componentTypeLen:
if componentTypeLen < idx:
raise error.PyAsn1Error('component index out of range')
self._componentValues = [noValue] * componentTypeLen
if value is noValue:
if componentTypeLen:
value = componentType.getTypeByPosition(idx)
if isinstance(value, base.AbstractConstructedAsn1Item):
value = value.clone(cloneValueFlag=componentType[idx].isDefaulted)
elif currentValue is noValue:
raise error.PyAsn1Error('Component type not defined')
elif not isinstance(value, base.Asn1Item):
if componentTypeLen:
subComponentType = componentType.getTypeByPosition(idx)
if isinstance(subComponentType, base.AbstractSimpleAsn1Item):
value = subComponentType.clone(value=value)
else:
raise error.PyAsn1Error('%s can cast only scalar values' % componentType.__class__.__name__)
elif currentValue is not noValue and isinstance(currentValue, base.AbstractSimpleAsn1Item):
value = currentValue.clone(value=value)
else:
raise error.PyAsn1Error('%s undefined component type' % componentType.__class__.__name__)
elif (matchTags or matchConstraints) and componentTypeLen:
subComponentType = componentType.getTypeByPosition(idx)
if subComponentType is not noValue:
subtypeChecker = (self.strictConstraints and
subComponentType.isSameTypeWith or
subComponentType.isSuperTypeOf)
if not subtypeChecker(value, matchTags, matchConstraints):
if not componentType[idx].openType:
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
if verifyConstraints and value.isValue:
try:
self.subtypeSpec(value, idx)
except error.PyAsn1Error:
exType, exValue, exTb = sys.exc_info()
raise exType('%s at %s' % (exValue, self.__class__.__name__))
if componentTypeLen or idx in self._dynamicNames:
self._componentValues[idx] = value
elif len(self._componentValues) == idx:
self._componentValues.append(value)
self._dynamicNames.addField(idx)
else:
raise error.PyAsn1Error('Component index out of range')
return self | ['def', 'setComponentByPosition', '(', 'self', ',', 'idx', ',', 'value', '=', 'noValue', ',', 'verifyConstraints', '=', 'True', ',', 'matchTags', '=', 'True', ',', 'matchConstraints', '=', 'True', ')', ':', 'componentType', '=', 'self', '.', 'componentType', 'componentTypeLen', '=', 'self', '.', '_componentTypeLen', 'try', ':', 'currentValue', '=', 'self', '.', '_componentValues', '[', 'idx', ']', 'except', 'IndexError', ':', 'currentValue', '=', 'noValue', 'if', 'componentTypeLen', ':', 'if', 'componentTypeLen', '<', 'idx', ':', 'raise', 'error', '.', 'PyAsn1Error', '(', "'component index out of range'", ')', 'self', '.', '_componentValues', '=', '[', 'noValue', ']', '*', 'componentTypeLen', 'if', 'value', 'is', 'noValue', ':', 'if', 'componentTypeLen', ':', 'value', '=', 'componentType', '.', 'getTypeByPosition', '(', 'idx', ')', 'if', 'isinstance', '(', 'value', ',', 'base', '.', 'AbstractConstructedAsn1Item', ')', ':', 'value', '=', 'value', '.', 'clone', '(', 'cloneValueFlag', '=', 'componentType', '[', 'idx', ']', '.', 'isDefaulted', ')', 'elif', 'currentValue', 'is', 'noValue', ':', 'raise', 'error', '.', 'PyAsn1Error', '(', "'Component type not defined'", ')', 'elif', 'not', 'isinstance', '(', 'value', ',', 'base', '.', 'Asn1Item', ')', ':', 'if', 'componentTypeLen', ':', 'subComponentType', '=', 'componentType', '.', 'getTypeByPosition', '(', 'idx', ')', 'if', 'isinstance', '(', 'subComponentType', ',', 'base', '.', 'AbstractSimpleAsn1Item', ')', ':', 'value', '=', 'subComponentType', '.', 'clone', '(', 'value', '=', 'value', ')', 'else', ':', 'raise', 'error', '.', 'PyAsn1Error', '(', "'%s can cast only scalar values'", '%', 'componentType', '.', '__class__', '.', '__name__', ')', 'elif', 'currentValue', 'is', 'not', 'noValue', 'and', 'isinstance', '(', 'currentValue', ',', 'base', '.', 'AbstractSimpleAsn1Item', ')', ':', 'value', '=', 'currentValue', '.', 'clone', '(', 'value', '=', 'value', ')', 'else', ':', 'raise', 'error', '.', 'PyAsn1Error', '(', "'%s undefined component type'", '%', 'componentType', '.', '__class__', '.', '__name__', ')', 'elif', '(', 'matchTags', 'or', 'matchConstraints', ')', 'and', 'componentTypeLen', ':', 'subComponentType', '=', 'componentType', '.', 'getTypeByPosition', '(', 'idx', ')', 'if', 'subComponentType', 'is', 'not', 'noValue', ':', 'subtypeChecker', '=', '(', 'self', '.', 'strictConstraints', 'and', 'subComponentType', '.', 'isSameTypeWith', 'or', 'subComponentType', '.', 'isSuperTypeOf', ')', 'if', 'not', 'subtypeChecker', '(', 'value', ',', 'matchTags', ',', 'matchConstraints', ')', ':', 'if', 'not', 'componentType', '[', 'idx', ']', '.', 'openType', ':', 'raise', 'error', '.', 'PyAsn1Error', '(', "'Component value is tag-incompatible: %r vs %r'", '%', '(', 'value', ',', 'componentType', ')', ')', 'if', 'verifyConstraints', 'and', 'value', '.', 'isValue', ':', 'try', ':', 'self', '.', 'subtypeSpec', '(', 'value', ',', 'idx', ')', 'except', 'error', '.', 'PyAsn1Error', ':', 'exType', ',', 'exValue', ',', 'exTb', '=', 'sys', '.', 'exc_info', '(', ')', 'raise', 'exType', '(', "'%s at %s'", '%', '(', 'exValue', ',', 'self', '.', '__class__', '.', '__name__', ')', ')', 'if', 'componentTypeLen', 'or', 'idx', 'in', 'self', '.', '_dynamicNames', ':', 'self', '.', '_componentValues', '[', 'idx', ']', '=', 'value', 'elif', 'len', '(', 'self', '.', '_componentValues', ')', '==', 'idx', ':', 'self', '.', '_componentValues', '.', 'append', '(', 'value', ')', 'self', '.', '_dynamicNames', '.', 'addField', '(', 'idx', ')', 'else', ':', 'raise', 'error', '.', 'PyAsn1Error', '(', "'Component index out of range'", ')', 'return', 'self'] | Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx : :class:`int`
Component index (zero-based). Must either refer to existing
component (if *componentType* is set) or to N+1 component
otherwise. In the latter case a new component of given ASN.1
type gets instantiated and appended to |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self | ['Assign', '|ASN', '.', '1|', 'type', 'component', 'by', 'position', '.'] | train | https://github.com/etingof/pyasn1/blob/25cf116ef8d11bb0e08454c0f3635c9f4002c2d6/pyasn1/type/univ.py#L2299-L2401 |
6,007 | bodylabs/lace | lace/geometry.py | MeshMixin.reorient | def reorient(self, up, look):
'''
Reorient the mesh by specifying two vectors.
up: The foot-to-head direction.
look: The direction the body is facing.
In the result, the up will end up along +y, and look along +z
(i.e. facing towards a default OpenGL camera).
'''
from blmath.geometry.transform import rotation_from_up_and_look
from blmath.numerics import as_numeric_array
up = as_numeric_array(up, (3,))
look = as_numeric_array(look, (3,))
if self.v is not None:
self.v = np.dot(rotation_from_up_and_look(up, look), self.v.T).T | python | def reorient(self, up, look):
'''
Reorient the mesh by specifying two vectors.
up: The foot-to-head direction.
look: The direction the body is facing.
In the result, the up will end up along +y, and look along +z
(i.e. facing towards a default OpenGL camera).
'''
from blmath.geometry.transform import rotation_from_up_and_look
from blmath.numerics import as_numeric_array
up = as_numeric_array(up, (3,))
look = as_numeric_array(look, (3,))
if self.v is not None:
self.v = np.dot(rotation_from_up_and_look(up, look), self.v.T).T | ['def', 'reorient', '(', 'self', ',', 'up', ',', 'look', ')', ':', 'from', 'blmath', '.', 'geometry', '.', 'transform', 'import', 'rotation_from_up_and_look', 'from', 'blmath', '.', 'numerics', 'import', 'as_numeric_array', 'up', '=', 'as_numeric_array', '(', 'up', ',', '(', '3', ',', ')', ')', 'look', '=', 'as_numeric_array', '(', 'look', ',', '(', '3', ',', ')', ')', 'if', 'self', '.', 'v', 'is', 'not', 'None', ':', 'self', '.', 'v', '=', 'np', '.', 'dot', '(', 'rotation_from_up_and_look', '(', 'up', ',', 'look', ')', ',', 'self', '.', 'v', '.', 'T', ')', '.', 'T'] | Reorient the mesh by specifying two vectors.
up: The foot-to-head direction.
look: The direction the body is facing.
In the result, the up will end up along +y, and look along +z
(i.e. facing towards a default OpenGL camera). | ['Reorient', 'the', 'mesh', 'by', 'specifying', 'two', 'vectors', '.'] | train | https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/geometry.py#L80-L98 |
6,008 | hydpy-dev/hydpy | hydpy/models/dam/dam_model.py | calc_requiredremotesupply_v1 | def calc_requiredremotesupply_v1(self):
"""Calculate the required maximum supply from another location
that can be discharged into the dam.
Required control parameters:
|HighestRemoteSupply|
|WaterLevelSupplyThreshold|
Required derived parameter:
|WaterLevelSupplySmoothPar|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|RequiredRemoteSupply|
Basic equation:
:math:`RequiredRemoteSupply = HighestRemoteSupply \\cdot
smooth_{logistic1}(WaterLevelSupplyThreshold-WaterLevel,
WaterLevelSupplySmoothPar)`
Used auxiliary method:
|smooth_logistic1|
Examples:
Method |calc_requiredremotesupply_v1| is functionally identical
with method |calc_allowedremoterelieve_v2|. Hence the following
examples serve for testing purposes only (see the documentation
on function |calc_allowedremoterelieve_v2| for more detailed
information):
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> highestremotesupply(_11_1_12=1.0, _03_31_12=1.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> waterlevelsupplythreshold(_11_1_12=3.0, _03_31_12=2.0,
... _04_1_12=4.0, _10_31_12=4.0)
>>> waterlevelsupplytolerance(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=1.0, _10_31_12=1.0)
>>> derived.waterlevelsupplysmoothpar.update()
>>> derived.toy.update()
>>> from hydpy import UnitTest
>>> test = UnitTest(model,
... model.calc_requiredremotesupply_v1,
... last_example=9,
... parseqs=(aides.waterlevel,
... fluxes.requiredremotesupply))
>>> test.nexts.waterlevel = range(9)
>>> model.idx_sim = pub.timegrids.init['2001.03.30']
>>> test(first_example=2, last_example=6)
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 3 | 1.0 | 1.0 |
| 4 | 2.0 | 1.0 |
| 5 | 3.0 | 0.0 |
| 6 | 4.0 | 0.0 |
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 1 | 0.0 | 2.0 |
| 2 | 1.0 | 1.999998 |
| 3 | 2.0 | 1.999796 |
| 4 | 3.0 | 1.98 |
| 5 | 4.0 | 1.0 |
| 6 | 5.0 | 0.02 |
| 7 | 6.0 | 0.000204 |
| 8 | 7.0 | 0.000002 |
| 9 | 8.0 | 0.0 |
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
aid = self.sequences.aides.fastaccess
toy = der.toy[self.idx_sim]
flu.requiredremotesupply = (
con.highestremotesupply[toy] *
smoothutils.smooth_logistic1(
con.waterlevelsupplythreshold[toy]-aid.waterlevel,
der.waterlevelsupplysmoothpar[toy])) | python | def calc_requiredremotesupply_v1(self):
"""Calculate the required maximum supply from another location
that can be discharged into the dam.
Required control parameters:
|HighestRemoteSupply|
|WaterLevelSupplyThreshold|
Required derived parameter:
|WaterLevelSupplySmoothPar|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|RequiredRemoteSupply|
Basic equation:
:math:`RequiredRemoteSupply = HighestRemoteSupply \\cdot
smooth_{logistic1}(WaterLevelSupplyThreshold-WaterLevel,
WaterLevelSupplySmoothPar)`
Used auxiliary method:
|smooth_logistic1|
Examples:
Method |calc_requiredremotesupply_v1| is functionally identical
with method |calc_allowedremoterelieve_v2|. Hence the following
examples serve for testing purposes only (see the documentation
on function |calc_allowedremoterelieve_v2| for more detailed
information):
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> highestremotesupply(_11_1_12=1.0, _03_31_12=1.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> waterlevelsupplythreshold(_11_1_12=3.0, _03_31_12=2.0,
... _04_1_12=4.0, _10_31_12=4.0)
>>> waterlevelsupplytolerance(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=1.0, _10_31_12=1.0)
>>> derived.waterlevelsupplysmoothpar.update()
>>> derived.toy.update()
>>> from hydpy import UnitTest
>>> test = UnitTest(model,
... model.calc_requiredremotesupply_v1,
... last_example=9,
... parseqs=(aides.waterlevel,
... fluxes.requiredremotesupply))
>>> test.nexts.waterlevel = range(9)
>>> model.idx_sim = pub.timegrids.init['2001.03.30']
>>> test(first_example=2, last_example=6)
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 3 | 1.0 | 1.0 |
| 4 | 2.0 | 1.0 |
| 5 | 3.0 | 0.0 |
| 6 | 4.0 | 0.0 |
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 1 | 0.0 | 2.0 |
| 2 | 1.0 | 1.999998 |
| 3 | 2.0 | 1.999796 |
| 4 | 3.0 | 1.98 |
| 5 | 4.0 | 1.0 |
| 6 | 5.0 | 0.02 |
| 7 | 6.0 | 0.000204 |
| 8 | 7.0 | 0.000002 |
| 9 | 8.0 | 0.0 |
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
aid = self.sequences.aides.fastaccess
toy = der.toy[self.idx_sim]
flu.requiredremotesupply = (
con.highestremotesupply[toy] *
smoothutils.smooth_logistic1(
con.waterlevelsupplythreshold[toy]-aid.waterlevel,
der.waterlevelsupplysmoothpar[toy])) | ['def', 'calc_requiredremotesupply_v1', '(', 'self', ')', ':', 'con', '=', 'self', '.', 'parameters', '.', 'control', '.', 'fastaccess', 'der', '=', 'self', '.', 'parameters', '.', 'derived', '.', 'fastaccess', 'flu', '=', 'self', '.', 'sequences', '.', 'fluxes', '.', 'fastaccess', 'aid', '=', 'self', '.', 'sequences', '.', 'aides', '.', 'fastaccess', 'toy', '=', 'der', '.', 'toy', '[', 'self', '.', 'idx_sim', ']', 'flu', '.', 'requiredremotesupply', '=', '(', 'con', '.', 'highestremotesupply', '[', 'toy', ']', '*', 'smoothutils', '.', 'smooth_logistic1', '(', 'con', '.', 'waterlevelsupplythreshold', '[', 'toy', ']', '-', 'aid', '.', 'waterlevel', ',', 'der', '.', 'waterlevelsupplysmoothpar', '[', 'toy', ']', ')', ')'] | Calculate the required maximum supply from another location
that can be discharged into the dam.
Required control parameters:
|HighestRemoteSupply|
|WaterLevelSupplyThreshold|
Required derived parameter:
|WaterLevelSupplySmoothPar|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|RequiredRemoteSupply|
Basic equation:
:math:`RequiredRemoteSupply = HighestRemoteSupply \\cdot
smooth_{logistic1}(WaterLevelSupplyThreshold-WaterLevel,
WaterLevelSupplySmoothPar)`
Used auxiliary method:
|smooth_logistic1|
Examples:
Method |calc_requiredremotesupply_v1| is functionally identical
with method |calc_allowedremoterelieve_v2|. Hence the following
examples serve for testing purposes only (see the documentation
on function |calc_allowedremoterelieve_v2| for more detailed
information):
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> highestremotesupply(_11_1_12=1.0, _03_31_12=1.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> waterlevelsupplythreshold(_11_1_12=3.0, _03_31_12=2.0,
... _04_1_12=4.0, _10_31_12=4.0)
>>> waterlevelsupplytolerance(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=1.0, _10_31_12=1.0)
>>> derived.waterlevelsupplysmoothpar.update()
>>> derived.toy.update()
>>> from hydpy import UnitTest
>>> test = UnitTest(model,
... model.calc_requiredremotesupply_v1,
... last_example=9,
... parseqs=(aides.waterlevel,
... fluxes.requiredremotesupply))
>>> test.nexts.waterlevel = range(9)
>>> model.idx_sim = pub.timegrids.init['2001.03.30']
>>> test(first_example=2, last_example=6)
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 3 | 1.0 | 1.0 |
| 4 | 2.0 | 1.0 |
| 5 | 3.0 | 0.0 |
| 6 | 4.0 | 0.0 |
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 1 | 0.0 | 2.0 |
| 2 | 1.0 | 1.999998 |
| 3 | 2.0 | 1.999796 |
| 4 | 3.0 | 1.98 |
| 5 | 4.0 | 1.0 |
| 6 | 5.0 | 0.02 |
| 7 | 6.0 | 0.000204 |
| 8 | 7.0 | 0.000002 |
| 9 | 8.0 | 0.0 | | ['Calculate', 'the', 'required', 'maximum', 'supply', 'from', 'another', 'location', 'that', 'can', 'be', 'discharged', 'into', 'the', 'dam', '.'] | train | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/dam/dam_model.py#L296-L379 |
6,009 | tkf/python-epc | epc/handler.py | EPCHandler.call | def call(self, name, *args, **kwds):
"""
Call method connected to this handler.
:type name: str
:arg name: Method name to call.
:type args: list
:arg args: Arguments for remote method to call.
:type callback: callable
:arg callback: A function to be called with returned value of
the remote method.
:type errback: callable
:arg errback: A function to be called with an error occurred
in the remote method. It is either an instance
of :class:`ReturnError` or :class:`EPCError`.
"""
self.callmanager.call(self, name, *args, **kwds) | python | def call(self, name, *args, **kwds):
"""
Call method connected to this handler.
:type name: str
:arg name: Method name to call.
:type args: list
:arg args: Arguments for remote method to call.
:type callback: callable
:arg callback: A function to be called with returned value of
the remote method.
:type errback: callable
:arg errback: A function to be called with an error occurred
in the remote method. It is either an instance
of :class:`ReturnError` or :class:`EPCError`.
"""
self.callmanager.call(self, name, *args, **kwds) | ['def', 'call', '(', 'self', ',', 'name', ',', '*', 'args', ',', '*', '*', 'kwds', ')', ':', 'self', '.', 'callmanager', '.', 'call', '(', 'self', ',', 'name', ',', '*', 'args', ',', '*', '*', 'kwds', ')'] | Call method connected to this handler.
:type name: str
:arg name: Method name to call.
:type args: list
:arg args: Arguments for remote method to call.
:type callback: callable
:arg callback: A function to be called with returned value of
the remote method.
:type errback: callable
:arg errback: A function to be called with an error occurred
in the remote method. It is either an instance
of :class:`ReturnError` or :class:`EPCError`. | ['Call', 'method', 'connected', 'to', 'this', 'handler', '.'] | train | https://github.com/tkf/python-epc/blob/f3673ae5c35f20a0f71546ab34c28e3dde3595c1/epc/handler.py#L362-L379 |
6,010 | merll/docker-map | dockermap/map/state/base.py | AbstractDependencyStateGenerator.get_states | def get_states(self, config_ids):
"""
Generates state information for the selected container and its dependencies / dependents.
:param config_ids: MapConfigId tuples.
:type config_ids: list[dockermap.map.input.MapConfigId]
:return: Iterable of configuration states.
:rtype: collections.Iterable[dockermap.map.state.ConfigState]
"""
input_paths = [
(config_id, list(self.get_dependency_path(config_id)))
for config_id in config_ids
]
log.debug("Dependency paths from input: %s", input_paths)
dependency_paths = merge_dependency_paths(input_paths)
log.debug("Merged dependency paths: %s", dependency_paths)
return itertools.chain.from_iterable(self._get_all_states(config_id, dependency_path)
for config_id, dependency_path in dependency_paths) | python | def get_states(self, config_ids):
"""
Generates state information for the selected container and its dependencies / dependents.
:param config_ids: MapConfigId tuples.
:type config_ids: list[dockermap.map.input.MapConfigId]
:return: Iterable of configuration states.
:rtype: collections.Iterable[dockermap.map.state.ConfigState]
"""
input_paths = [
(config_id, list(self.get_dependency_path(config_id)))
for config_id in config_ids
]
log.debug("Dependency paths from input: %s", input_paths)
dependency_paths = merge_dependency_paths(input_paths)
log.debug("Merged dependency paths: %s", dependency_paths)
return itertools.chain.from_iterable(self._get_all_states(config_id, dependency_path)
for config_id, dependency_path in dependency_paths) | ['def', 'get_states', '(', 'self', ',', 'config_ids', ')', ':', 'input_paths', '=', '[', '(', 'config_id', ',', 'list', '(', 'self', '.', 'get_dependency_path', '(', 'config_id', ')', ')', ')', 'for', 'config_id', 'in', 'config_ids', ']', 'log', '.', 'debug', '(', '"Dependency paths from input: %s"', ',', 'input_paths', ')', 'dependency_paths', '=', 'merge_dependency_paths', '(', 'input_paths', ')', 'log', '.', 'debug', '(', '"Merged dependency paths: %s"', ',', 'dependency_paths', ')', 'return', 'itertools', '.', 'chain', '.', 'from_iterable', '(', 'self', '.', '_get_all_states', '(', 'config_id', ',', 'dependency_path', ')', 'for', 'config_id', ',', 'dependency_path', 'in', 'dependency_paths', ')'] | Generates state information for the selected container and its dependencies / dependents.
:param config_ids: MapConfigId tuples.
:type config_ids: list[dockermap.map.input.MapConfigId]
:return: Iterable of configuration states.
:rtype: collections.Iterable[dockermap.map.state.ConfigState] | ['Generates', 'state', 'information', 'for', 'the', 'selected', 'container', 'and', 'its', 'dependencies', '/', 'dependents', '.'] | train | https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/state/base.py#L416-L433 |
6,011 | aouyar/PyMunin | pysysinfo/asterisk.py | AsteriskInfo.hasChannelType | def hasChannelType(self, chan):
"""Returns True if chan is among the supported channel types.
@param app: Module name.
@return: Boolean
"""
if self._chantypes is None:
self._initChannelTypesList()
return chan in self._chantypes | python | def hasChannelType(self, chan):
"""Returns True if chan is among the supported channel types.
@param app: Module name.
@return: Boolean
"""
if self._chantypes is None:
self._initChannelTypesList()
return chan in self._chantypes | ['def', 'hasChannelType', '(', 'self', ',', 'chan', ')', ':', 'if', 'self', '.', '_chantypes', 'is', 'None', ':', 'self', '.', '_initChannelTypesList', '(', ')', 'return', 'chan', 'in', 'self', '.', '_chantypes'] | Returns True if chan is among the supported channel types.
@param app: Module name.
@return: Boolean | ['Returns', 'True', 'if', 'chan', 'is', 'among', 'the', 'supported', 'channel', 'types', '.'] | train | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/asterisk.py#L350-L359 |
6,012 | paylogic/halogen | halogen/schema.py | Attr.deserialize | def deserialize(self, value, **kwargs):
"""Deserialize the attribute from a HAL structure.
Get the value from the HAL structure from the attribute's compartment
using the attribute's name as a key, convert it using the attribute's
type. Schema will either return it to parent schema or will assign
to the output value if specified using the attribute's accessor setter.
:param value: HAL structure to get the value from.
:return: Deserialized attribute value.
:raises: ValidationError.
"""
compartment = value
if self.compartment is not None:
compartment = value[self.compartment]
try:
value = self.accessor.get(compartment, **kwargs)
except (KeyError, AttributeError):
if not hasattr(self, "default") and self.required:
raise
return self.default() if callable(self.default) else self.default
return self.attr_type.deserialize(value, **kwargs) | python | def deserialize(self, value, **kwargs):
"""Deserialize the attribute from a HAL structure.
Get the value from the HAL structure from the attribute's compartment
using the attribute's name as a key, convert it using the attribute's
type. Schema will either return it to parent schema or will assign
to the output value if specified using the attribute's accessor setter.
:param value: HAL structure to get the value from.
:return: Deserialized attribute value.
:raises: ValidationError.
"""
compartment = value
if self.compartment is not None:
compartment = value[self.compartment]
try:
value = self.accessor.get(compartment, **kwargs)
except (KeyError, AttributeError):
if not hasattr(self, "default") and self.required:
raise
return self.default() if callable(self.default) else self.default
return self.attr_type.deserialize(value, **kwargs) | ['def', 'deserialize', '(', 'self', ',', 'value', ',', '*', '*', 'kwargs', ')', ':', 'compartment', '=', 'value', 'if', 'self', '.', 'compartment', 'is', 'not', 'None', ':', 'compartment', '=', 'value', '[', 'self', '.', 'compartment', ']', 'try', ':', 'value', '=', 'self', '.', 'accessor', '.', 'get', '(', 'compartment', ',', '*', '*', 'kwargs', ')', 'except', '(', 'KeyError', ',', 'AttributeError', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', '"default"', ')', 'and', 'self', '.', 'required', ':', 'raise', 'return', 'self', '.', 'default', '(', ')', 'if', 'callable', '(', 'self', '.', 'default', ')', 'else', 'self', '.', 'default', 'return', 'self', '.', 'attr_type', '.', 'deserialize', '(', 'value', ',', '*', '*', 'kwargs', ')'] | Deserialize the attribute from a HAL structure.
Get the value from the HAL structure from the attribute's compartment
using the attribute's name as a key, convert it using the attribute's
type. Schema will either return it to parent schema or will assign
to the output value if specified using the attribute's accessor setter.
:param value: HAL structure to get the value from.
:return: Deserialized attribute value.
:raises: ValidationError. | ['Deserialize', 'the', 'attribute', 'from', 'a', 'HAL', 'structure', '.'] | train | https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/schema.py#L185-L209 |
6,013 | Azure/azure-event-hubs-python | azure/eventprocessorhost/partition_manager.py | PartitionManager.attempt_renew_lease | def attempt_renew_lease(self, lease_task, owned_by_others_q, lease_manager):
"""
Make attempt_renew_lease async call sync.
"""
loop = asyncio.new_event_loop()
loop.run_until_complete(self.attempt_renew_lease_async(lease_task, owned_by_others_q, lease_manager)) | python | def attempt_renew_lease(self, lease_task, owned_by_others_q, lease_manager):
"""
Make attempt_renew_lease async call sync.
"""
loop = asyncio.new_event_loop()
loop.run_until_complete(self.attempt_renew_lease_async(lease_task, owned_by_others_q, lease_manager)) | ['def', 'attempt_renew_lease', '(', 'self', ',', 'lease_task', ',', 'owned_by_others_q', ',', 'lease_manager', ')', ':', 'loop', '=', 'asyncio', '.', 'new_event_loop', '(', ')', 'loop', '.', 'run_until_complete', '(', 'self', '.', 'attempt_renew_lease_async', '(', 'lease_task', ',', 'owned_by_others_q', ',', 'lease_manager', ')', ')'] | Make attempt_renew_lease async call sync. | ['Make', 'attempt_renew_lease', 'async', 'call', 'sync', '.'] | train | https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/partition_manager.py#L320-L325 |
6,014 | dariusbakunas/rawdisk | rawdisk/plugins/filesystems/ntfs/bootsector.py | BootSector.mft_mirror_offset | def mft_mirror_offset(self):
"""
Returns:
int: Mirror MFT Table offset from the beginning of the partition \
in bytes
"""
return self.bpb.bytes_per_sector * \
self.bpb.sectors_per_cluster * self.extended_bpb.mft_mirror_cluster | python | def mft_mirror_offset(self):
"""
Returns:
int: Mirror MFT Table offset from the beginning of the partition \
in bytes
"""
return self.bpb.bytes_per_sector * \
self.bpb.sectors_per_cluster * self.extended_bpb.mft_mirror_cluster | ['def', 'mft_mirror_offset', '(', 'self', ')', ':', 'return', 'self', '.', 'bpb', '.', 'bytes_per_sector', '*', 'self', '.', 'bpb', '.', 'sectors_per_cluster', '*', 'self', '.', 'extended_bpb', '.', 'mft_mirror_cluster'] | Returns:
int: Mirror MFT Table offset from the beginning of the partition \
in bytes | ['Returns', ':', 'int', ':', 'Mirror', 'MFT', 'Table', 'offset', 'from', 'the', 'beginning', 'of', 'the', 'partition', '\\', 'in', 'bytes'] | train | https://github.com/dariusbakunas/rawdisk/blob/1dc9d0b377fe5da3c406ccec4abc238c54167403/rawdisk/plugins/filesystems/ntfs/bootsector.py#L72-L79 |
6,015 | BlackEarth/bf | bf/css.py | CSS.selector_to_xpath | def selector_to_xpath(cls, selector, xmlns=None):
"""convert a css selector into an xpath expression.
xmlns is option single-item dict with namespace prefix and href
"""
selector = selector.replace(' .', ' *.')
if selector[0] == '.':
selector = '*' + selector
log.debug(selector)
if '#' in selector:
selector = selector.replace('#', '*#')
log.debug(selector)
if xmlns is not None:
prefix = list(xmlns.keys())[0]
href = xmlns[prefix]
selector = ' '.join([
(n.strip() != '>' and prefix + '|' + n.strip() or n.strip())
for n in selector.split(' ')
])
log.debug(selector)
path = cssselect.GenericTranslator().css_to_xpath(selector)
path = path.replace("descendant-or-self::", "")
path = path.replace("/descendant::", "//")
path = path.replace('/*/', '//')
log.debug(' ==> %s' % path)
return path | python | def selector_to_xpath(cls, selector, xmlns=None):
"""convert a css selector into an xpath expression.
xmlns is option single-item dict with namespace prefix and href
"""
selector = selector.replace(' .', ' *.')
if selector[0] == '.':
selector = '*' + selector
log.debug(selector)
if '#' in selector:
selector = selector.replace('#', '*#')
log.debug(selector)
if xmlns is not None:
prefix = list(xmlns.keys())[0]
href = xmlns[prefix]
selector = ' '.join([
(n.strip() != '>' and prefix + '|' + n.strip() or n.strip())
for n in selector.split(' ')
])
log.debug(selector)
path = cssselect.GenericTranslator().css_to_xpath(selector)
path = path.replace("descendant-or-self::", "")
path = path.replace("/descendant::", "//")
path = path.replace('/*/', '//')
log.debug(' ==> %s' % path)
return path | ['def', 'selector_to_xpath', '(', 'cls', ',', 'selector', ',', 'xmlns', '=', 'None', ')', ':', 'selector', '=', 'selector', '.', 'replace', '(', "' .'", ',', "' *.'", ')', 'if', 'selector', '[', '0', ']', '==', "'.'", ':', 'selector', '=', "'*'", '+', 'selector', 'log', '.', 'debug', '(', 'selector', ')', 'if', "'#'", 'in', 'selector', ':', 'selector', '=', 'selector', '.', 'replace', '(', "'#'", ',', "'*#'", ')', 'log', '.', 'debug', '(', 'selector', ')', 'if', 'xmlns', 'is', 'not', 'None', ':', 'prefix', '=', 'list', '(', 'xmlns', '.', 'keys', '(', ')', ')', '[', '0', ']', 'href', '=', 'xmlns', '[', 'prefix', ']', 'selector', '=', "' '", '.', 'join', '(', '[', '(', 'n', '.', 'strip', '(', ')', '!=', "'>'", 'and', 'prefix', '+', "'|'", '+', 'n', '.', 'strip', '(', ')', 'or', 'n', '.', 'strip', '(', ')', ')', 'for', 'n', 'in', 'selector', '.', 'split', '(', "' '", ')', ']', ')', 'log', '.', 'debug', '(', 'selector', ')', 'path', '=', 'cssselect', '.', 'GenericTranslator', '(', ')', '.', 'css_to_xpath', '(', 'selector', ')', 'path', '=', 'path', '.', 'replace', '(', '"descendant-or-self::"', ',', '""', ')', 'path', '=', 'path', '.', 'replace', '(', '"/descendant::"', ',', '"//"', ')', 'path', '=', 'path', '.', 'replace', '(', "'/*/'", ',', "'//'", ')', 'log', '.', 'debug', '(', "' ==> %s'", '%', 'path', ')', 'return', 'path'] | convert a css selector into an xpath expression.
xmlns is option single-item dict with namespace prefix and href | ['convert', 'a', 'css', 'selector', 'into', 'an', 'xpath', 'expression', '.', 'xmlns', 'is', 'option', 'single', '-', 'item', 'dict', 'with', 'namespace', 'prefix', 'and', 'href'] | train | https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/css.py#L102-L131 |
6,016 | projecthamster/hamster | src/hamster/lib/graphics.py | Scene.start_drag | def start_drag(self, sprite, cursor_x = None, cursor_y = None):
"""start dragging given sprite"""
cursor_x, cursor_y = cursor_x or sprite.x, cursor_y or sprite.y
self._mouse_down_sprite = self._drag_sprite = sprite
sprite.drag_x, sprite.drag_y = self._drag_sprite.x, self._drag_sprite.y
self.__drag_start_x, self.__drag_start_y = cursor_x, cursor_y
self.__drag_started = True | python | def start_drag(self, sprite, cursor_x = None, cursor_y = None):
"""start dragging given sprite"""
cursor_x, cursor_y = cursor_x or sprite.x, cursor_y or sprite.y
self._mouse_down_sprite = self._drag_sprite = sprite
sprite.drag_x, sprite.drag_y = self._drag_sprite.x, self._drag_sprite.y
self.__drag_start_x, self.__drag_start_y = cursor_x, cursor_y
self.__drag_started = True | ['def', 'start_drag', '(', 'self', ',', 'sprite', ',', 'cursor_x', '=', 'None', ',', 'cursor_y', '=', 'None', ')', ':', 'cursor_x', ',', 'cursor_y', '=', 'cursor_x', 'or', 'sprite', '.', 'x', ',', 'cursor_y', 'or', 'sprite', '.', 'y', 'self', '.', '_mouse_down_sprite', '=', 'self', '.', '_drag_sprite', '=', 'sprite', 'sprite', '.', 'drag_x', ',', 'sprite', '.', 'drag_y', '=', 'self', '.', '_drag_sprite', '.', 'x', ',', 'self', '.', '_drag_sprite', '.', 'y', 'self', '.', '__drag_start_x', ',', 'self', '.', '__drag_start_y', '=', 'cursor_x', ',', 'cursor_y', 'self', '.', '__drag_started', '=', 'True'] | start dragging given sprite | ['start', 'dragging', 'given', 'sprite'] | train | https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/graphics.py#L2158-L2165 |
6,017 | saltstack/salt | salt/states/rbenv.py | installed | def installed(name, default=False, user=None):
'''
Verify that the specified ruby is installed with rbenv. Rbenv is
installed if necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rbenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
rbenv_installed_ret = copy.deepcopy(ret)
if name.startswith('ruby-'):
name = re.sub(r'^ruby-', '', name)
if __opts__['test']:
ret = _ruby_installed(ret, name, user=user)
if not ret['result']:
ret['comment'] = 'Ruby {0} is set to be installed'.format(name)
else:
ret['comment'] = 'Ruby {0} is already installed'.format(name)
return ret
rbenv_installed_ret = _check_and_install_rbenv(rbenv_installed_ret, user)
if rbenv_installed_ret['result'] is False:
ret['result'] = False
ret['comment'] = 'Rbenv failed to install'
return ret
else:
return _check_and_install_ruby(ret, name, default, user=user) | python | def installed(name, default=False, user=None):
'''
Verify that the specified ruby is installed with rbenv. Rbenv is
installed if necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rbenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
rbenv_installed_ret = copy.deepcopy(ret)
if name.startswith('ruby-'):
name = re.sub(r'^ruby-', '', name)
if __opts__['test']:
ret = _ruby_installed(ret, name, user=user)
if not ret['result']:
ret['comment'] = 'Ruby {0} is set to be installed'.format(name)
else:
ret['comment'] = 'Ruby {0} is already installed'.format(name)
return ret
rbenv_installed_ret = _check_and_install_rbenv(rbenv_installed_ret, user)
if rbenv_installed_ret['result'] is False:
ret['result'] = False
ret['comment'] = 'Rbenv failed to install'
return ret
else:
return _check_and_install_ruby(ret, name, default, user=user) | ['def', 'installed', '(', 'name', ',', 'default', '=', 'False', ',', 'user', '=', 'None', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'result'", ':', 'None', ',', "'comment'", ':', "''", ',', "'changes'", ':', '{', '}', '}', 'rbenv_installed_ret', '=', 'copy', '.', 'deepcopy', '(', 'ret', ')', 'if', 'name', '.', 'startswith', '(', "'ruby-'", ')', ':', 'name', '=', 're', '.', 'sub', '(', "r'^ruby-'", ',', "''", ',', 'name', ')', 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '=', '_ruby_installed', '(', 'ret', ',', 'name', ',', 'user', '=', 'user', ')', 'if', 'not', 'ret', '[', "'result'", ']', ':', 'ret', '[', "'comment'", ']', '=', "'Ruby {0} is set to be installed'", '.', 'format', '(', 'name', ')', 'else', ':', 'ret', '[', "'comment'", ']', '=', "'Ruby {0} is already installed'", '.', 'format', '(', 'name', ')', 'return', 'ret', 'rbenv_installed_ret', '=', '_check_and_install_rbenv', '(', 'rbenv_installed_ret', ',', 'user', ')', 'if', 'rbenv_installed_ret', '[', "'result'", ']', 'is', 'False', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', "'Rbenv failed to install'", 'return', 'ret', 'else', ':', 'return', '_check_and_install_ruby', '(', 'ret', ',', 'name', ',', 'default', ',', 'user', '=', 'user', ')'] | Verify that the specified ruby is installed with rbenv. Rbenv is
installed if necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rbenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0 | ['Verify', 'that', 'the', 'specified', 'ruby', 'is', 'installed', 'with', 'rbenv', '.', 'Rbenv', 'is', 'installed', 'if', 'necessary', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rbenv.py#L109-L147 |
6,018 | WebarchivCZ/WA-KAT | src/wa_kat/templates/static/js/Lib/site-packages/components/input_controller.py | InputController._set_typeahead | def _set_typeahead(cls, el, value):
"""
Convert given `el` to typeahead input and set it to `value`.
This method also sets the dropdown icons and descriptors.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
value (list): List of dicts with two keys: ``source`` and ``val``.
"""
PlaceholderHandler.reset_placeholder_dropdown(el)
# if there is no elements, show alert icon in glyph
if not value and not el.value:
DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-alert")
return
# if there is only one element, don't use typeahead, just put the
# information to the input, set different dropdown glyph and put source
# to the dropdown
if len(value) == 1:
source = value[0]["source"].strip()
dropdown_el = DropdownHandler.set_dropdown_glyph(
el.id,
"glyphicon-eye-open"
)
dropdown_content = "<span class='gray_text'> (%s)</span>"
# save the source to the dropdown menu
if source:
dropdown_el.html = dropdown_content % source[::-1]
el.value = value[0]["val"]
return
# get reference to parent element
parent_id = el.parent.id
if "typeahead" not in parent_id.lower():
parent_id = el.parent.parent.id
if parent_id in cls._set_by_typeahead:
window.destroy_typeahead_tag("#" + parent_id)
# if there are multiple elements, put them to the typeahead and show
# dropdown glyph
window.make_typeahead_tag("#" + parent_id, value)
DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-menu-down")
PlaceholderHandler.set_placeholder_dropdown(el)
cls._set_by_typeahead.add(parent_id) | python | def _set_typeahead(cls, el, value):
"""
Convert given `el` to typeahead input and set it to `value`.
This method also sets the dropdown icons and descriptors.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
value (list): List of dicts with two keys: ``source`` and ``val``.
"""
PlaceholderHandler.reset_placeholder_dropdown(el)
# if there is no elements, show alert icon in glyph
if not value and not el.value:
DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-alert")
return
# if there is only one element, don't use typeahead, just put the
# information to the input, set different dropdown glyph and put source
# to the dropdown
if len(value) == 1:
source = value[0]["source"].strip()
dropdown_el = DropdownHandler.set_dropdown_glyph(
el.id,
"glyphicon-eye-open"
)
dropdown_content = "<span class='gray_text'> (%s)</span>"
# save the source to the dropdown menu
if source:
dropdown_el.html = dropdown_content % source[::-1]
el.value = value[0]["val"]
return
# get reference to parent element
parent_id = el.parent.id
if "typeahead" not in parent_id.lower():
parent_id = el.parent.parent.id
if parent_id in cls._set_by_typeahead:
window.destroy_typeahead_tag("#" + parent_id)
# if there are multiple elements, put them to the typeahead and show
# dropdown glyph
window.make_typeahead_tag("#" + parent_id, value)
DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-menu-down")
PlaceholderHandler.set_placeholder_dropdown(el)
cls._set_by_typeahead.add(parent_id) | ['def', '_set_typeahead', '(', 'cls', ',', 'el', ',', 'value', ')', ':', 'PlaceholderHandler', '.', 'reset_placeholder_dropdown', '(', 'el', ')', '# if there is no elements, show alert icon in glyph', 'if', 'not', 'value', 'and', 'not', 'el', '.', 'value', ':', 'DropdownHandler', '.', 'set_dropdown_glyph', '(', 'el', '.', 'id', ',', '"glyphicon-alert"', ')', 'return', "# if there is only one element, don't use typeahead, just put the", '# information to the input, set different dropdown glyph and put source', '# to the dropdown', 'if', 'len', '(', 'value', ')', '==', '1', ':', 'source', '=', 'value', '[', '0', ']', '[', '"source"', ']', '.', 'strip', '(', ')', 'dropdown_el', '=', 'DropdownHandler', '.', 'set_dropdown_glyph', '(', 'el', '.', 'id', ',', '"glyphicon-eye-open"', ')', 'dropdown_content', '=', '"<span class=\'gray_text\'> (%s)</span>"', '# save the source to the dropdown menu', 'if', 'source', ':', 'dropdown_el', '.', 'html', '=', 'dropdown_content', '%', 'source', '[', ':', ':', '-', '1', ']', 'el', '.', 'value', '=', 'value', '[', '0', ']', '[', '"val"', ']', 'return', '# get reference to parent element', 'parent_id', '=', 'el', '.', 'parent', '.', 'id', 'if', '"typeahead"', 'not', 'in', 'parent_id', '.', 'lower', '(', ')', ':', 'parent_id', '=', 'el', '.', 'parent', '.', 'parent', '.', 'id', 'if', 'parent_id', 'in', 'cls', '.', '_set_by_typeahead', ':', 'window', '.', 'destroy_typeahead_tag', '(', '"#"', '+', 'parent_id', ')', '# if there are multiple elements, put them to the typeahead and show', '# dropdown glyph', 'window', '.', 'make_typeahead_tag', '(', '"#"', '+', 'parent_id', ',', 'value', ')', 'DropdownHandler', '.', 'set_dropdown_glyph', '(', 'el', '.', 'id', ',', '"glyphicon-menu-down"', ')', 'PlaceholderHandler', '.', 'set_placeholder_dropdown', '(', 'el', ')', 'cls', '.', '_set_by_typeahead', '.', 'add', '(', 'parent_id', ')'] | Convert given `el` to typeahead input and set it to `value`.
This method also sets the dropdown icons and descriptors.
Args:
el (obj): Element reference to the input you want to convert to
typeahead.
value (list): List of dicts with two keys: ``source`` and ``val``. | ['Convert', 'given', 'el', 'to', 'typeahead', 'input', 'and', 'set', 'it', 'to', 'value', '.'] | train | https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/input_controller.py#L65-L114 |
6,019 | kpdyer/regex2dfa | third_party/re2/lib/codereview/codereview.py | EncodeMultipartFormData | def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body | python | def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body | ['def', 'EncodeMultipartFormData', '(', 'fields', ',', 'files', ')', ':', 'BOUNDARY', '=', "'-M-A-G-I-C---B-O-U-N-D-A-R-Y-'", 'CRLF', '=', "'\\r\\n'", 'lines', '=', '[', ']', 'for', '(', 'key', ',', 'value', ')', 'in', 'fields', ':', 'typecheck', '(', 'key', ',', 'str', ')', 'typecheck', '(', 'value', ',', 'str', ')', 'lines', '.', 'append', '(', "'--'", '+', 'BOUNDARY', ')', 'lines', '.', 'append', '(', '\'Content-Disposition: form-data; name="%s"\'', '%', 'key', ')', 'lines', '.', 'append', '(', "''", ')', 'lines', '.', 'append', '(', 'value', ')', 'for', '(', 'key', ',', 'filename', ',', 'value', ')', 'in', 'files', ':', 'typecheck', '(', 'key', ',', 'str', ')', 'typecheck', '(', 'filename', ',', 'str', ')', 'typecheck', '(', 'value', ',', 'str', ')', 'lines', '.', 'append', '(', "'--'", '+', 'BOUNDARY', ')', 'lines', '.', 'append', '(', '\'Content-Disposition: form-data; name="%s"; filename="%s"\'', '%', '(', 'key', ',', 'filename', ')', ')', 'lines', '.', 'append', '(', "'Content-Type: %s'", '%', 'GetContentType', '(', 'filename', ')', ')', 'lines', '.', 'append', '(', "''", ')', 'lines', '.', 'append', '(', 'value', ')', 'lines', '.', 'append', '(', "'--'", '+', 'BOUNDARY', '+', "'--'", ')', 'lines', '.', 'append', '(', "''", ')', 'body', '=', 'CRLF', '.', 'join', '(', 'lines', ')', 'content_type', '=', "'multipart/form-data; boundary=%s'", '%', 'BOUNDARY', 'return', 'content_type', ',', 'body'] | Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306 | ['Encode', 'form', 'fields', 'for', 'multipart', '/', 'form', '-', 'data', '.'] | train | https://github.com/kpdyer/regex2dfa/blob/109f877e60ef0dfcb430f11516d215930b7b9936/third_party/re2/lib/codereview/codereview.py#L3094-L3130 |
6,020 | wright-group/WrightTools | WrightTools/diagrams/WMEL.py | Artist.label_rows | def label_rows(self, labels, font_size=15, text_buffer=1.5):
"""Label rows.
Parameters
----------
labels : list of strings
Labels.
font_size : number (optional)
Font size. Default is 15.
text_buffer : number
Buffer around text. Default is 1.5.
"""
for i in range(len(self.subplots)):
plot = self.subplots[i][-1]
plot.text(
text_buffer,
0.5,
labels[i],
fontsize=font_size,
verticalalignment="center",
horizontalalignment="center",
) | python | def label_rows(self, labels, font_size=15, text_buffer=1.5):
"""Label rows.
Parameters
----------
labels : list of strings
Labels.
font_size : number (optional)
Font size. Default is 15.
text_buffer : number
Buffer around text. Default is 1.5.
"""
for i in range(len(self.subplots)):
plot = self.subplots[i][-1]
plot.text(
text_buffer,
0.5,
labels[i],
fontsize=font_size,
verticalalignment="center",
horizontalalignment="center",
) | ['def', 'label_rows', '(', 'self', ',', 'labels', ',', 'font_size', '=', '15', ',', 'text_buffer', '=', '1.5', ')', ':', 'for', 'i', 'in', 'range', '(', 'len', '(', 'self', '.', 'subplots', ')', ')', ':', 'plot', '=', 'self', '.', 'subplots', '[', 'i', ']', '[', '-', '1', ']', 'plot', '.', 'text', '(', 'text_buffer', ',', '0.5', ',', 'labels', '[', 'i', ']', ',', 'fontsize', '=', 'font_size', ',', 'verticalalignment', '=', '"center"', ',', 'horizontalalignment', '=', '"center"', ',', ')'] | Label rows.
Parameters
----------
labels : list of strings
Labels.
font_size : number (optional)
Font size. Default is 15.
text_buffer : number
Buffer around text. Default is 1.5. | ['Label', 'rows', '.'] | train | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/diagrams/WMEL.py#L261-L282 |
6,021 | Iotic-Labs/py-IoticAgent | src/IoticAgent/Core/RateLimiter.py | RateLimiter.throttle | def throttle(self):
"""Uses time.monotonic() (or time.sleep() if not available) to limit to the desired rate. Should be called once
per iteration of action which is to be throttled. Returns None unless a custom wait_cmd was specified in the
constructor in which case its return value is used if a wait was required.
"""
iterations = self.__iterations
timestamp = monotonic()
outdated_threshold = timestamp - self.__interval
with self.__lock:
# remove any iterations older than interval
try:
while iterations[0] < outdated_threshold:
iterations.popleft()
except IndexError:
pass
# apply throttling if rate would be exceeded
if len(iterations) <= self.__max_iterations:
iterations.append(timestamp)
retval = None
else:
# wait until oldest sample is too old
delay = max(0, iterations[0] + self.__interval - timestamp)
# only notify user about longer delays
if delay > 1:
logger.warning('Send throttling delay (interval=%d, max_iterations=%d): %.2fs', self.__interval,
self.__max_iterations, delay)
retval = self.__wait_cmd(delay)
# log actual addition time
iterations.append(monotonic())
return retval | python | def throttle(self):
"""Uses time.monotonic() (or time.sleep() if not available) to limit to the desired rate. Should be called once
per iteration of action which is to be throttled. Returns None unless a custom wait_cmd was specified in the
constructor in which case its return value is used if a wait was required.
"""
iterations = self.__iterations
timestamp = monotonic()
outdated_threshold = timestamp - self.__interval
with self.__lock:
# remove any iterations older than interval
try:
while iterations[0] < outdated_threshold:
iterations.popleft()
except IndexError:
pass
# apply throttling if rate would be exceeded
if len(iterations) <= self.__max_iterations:
iterations.append(timestamp)
retval = None
else:
# wait until oldest sample is too old
delay = max(0, iterations[0] + self.__interval - timestamp)
# only notify user about longer delays
if delay > 1:
logger.warning('Send throttling delay (interval=%d, max_iterations=%d): %.2fs', self.__interval,
self.__max_iterations, delay)
retval = self.__wait_cmd(delay)
# log actual addition time
iterations.append(monotonic())
return retval | ['def', 'throttle', '(', 'self', ')', ':', 'iterations', '=', 'self', '.', '__iterations', 'timestamp', '=', 'monotonic', '(', ')', 'outdated_threshold', '=', 'timestamp', '-', 'self', '.', '__interval', 'with', 'self', '.', '__lock', ':', '# remove any iterations older than interval', 'try', ':', 'while', 'iterations', '[', '0', ']', '<', 'outdated_threshold', ':', 'iterations', '.', 'popleft', '(', ')', 'except', 'IndexError', ':', 'pass', '# apply throttling if rate would be exceeded', 'if', 'len', '(', 'iterations', ')', '<=', 'self', '.', '__max_iterations', ':', 'iterations', '.', 'append', '(', 'timestamp', ')', 'retval', '=', 'None', 'else', ':', '# wait until oldest sample is too old', 'delay', '=', 'max', '(', '0', ',', 'iterations', '[', '0', ']', '+', 'self', '.', '__interval', '-', 'timestamp', ')', '# only notify user about longer delays', 'if', 'delay', '>', '1', ':', 'logger', '.', 'warning', '(', "'Send throttling delay (interval=%d, max_iterations=%d): %.2fs'", ',', 'self', '.', '__interval', ',', 'self', '.', '__max_iterations', ',', 'delay', ')', 'retval', '=', 'self', '.', '__wait_cmd', '(', 'delay', ')', '# log actual addition time', 'iterations', '.', 'append', '(', 'monotonic', '(', ')', ')', 'return', 'retval'] | Uses time.monotonic() (or time.sleep() if not available) to limit to the desired rate. Should be called once
per iteration of action which is to be throttled. Returns None unless a custom wait_cmd was specified in the
constructor in which case its return value is used if a wait was required. | ['Uses', 'time', '.', 'monotonic', '()', '(', 'or', 'time', '.', 'sleep', '()', 'if', 'not', 'available', ')', 'to', 'limit', 'to', 'the', 'desired', 'rate', '.', 'Should', 'be', 'called', 'once', 'per', 'iteration', 'of', 'action', 'which', 'is', 'to', 'be', 'throttled', '.', 'Returns', 'None', 'unless', 'a', 'custom', 'wait_cmd', 'was', 'specified', 'in', 'the', 'constructor', 'in', 'which', 'case', 'its', 'return', 'value', 'is', 'used', 'if', 'a', 'wait', 'was', 'required', '.'] | train | https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/RateLimiter.py#L54-L85 |
6,022 | DLR-RM/RAFCON | source/rafcon/gui/controllers/execution_history.py | ExecutionHistoryTreeController.notification_selected_sm_changed | def notification_selected_sm_changed(self, model, prop_name, info):
"""If a new state machine is selected, make sure expansion state is stored and tree updated"""
selected_state_machine_id = self.model.selected_state_machine_id
if selected_state_machine_id is None:
return
self.update() | python | def notification_selected_sm_changed(self, model, prop_name, info):
"""If a new state machine is selected, make sure expansion state is stored and tree updated"""
selected_state_machine_id = self.model.selected_state_machine_id
if selected_state_machine_id is None:
return
self.update() | ['def', 'notification_selected_sm_changed', '(', 'self', ',', 'model', ',', 'prop_name', ',', 'info', ')', ':', 'selected_state_machine_id', '=', 'self', '.', 'model', '.', 'selected_state_machine_id', 'if', 'selected_state_machine_id', 'is', 'None', ':', 'return', 'self', '.', 'update', '(', ')'] | If a new state machine is selected, make sure expansion state is stored and tree updated | ['If', 'a', 'new', 'state', 'machine', 'is', 'selected', 'make', 'sure', 'expansion', 'state', 'is', 'stored', 'and', 'tree', 'updated'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/execution_history.py#L325-L330 |
6,023 | oczkers/pyllegro | pyllegro/core.py | Allegro.getOrders | def getOrders(self, auction_ids):
"""Return orders details."""
orders = {}
# chunk list (only 25 auction_ids per request)
for chunk in chunked(auction_ids, 25):
# auctions = [{'item': auction_id} for auction_id in chunk] # TODO?: is it needed?
auctions = self.ArrayOfLong(chunk)
rc = self.__ask__('doGetPostBuyData', itemsArray=auctions)
for auction in rc:
orders_auction = []
bids = self.getBids(auction['itemId'])
# get orders details
# for i in auction.get('usersPostBuyData', ()):
if not auction['usersPostBuyData']: # empty
continue
for i in auction['usersPostBuyData']['item']:
i = i['userData']
if i['userId'] not in bids: # temporary(?) webapi bug fix
continue
orders_auction.append({
'allegro_aid': auction['itemId'],
'allegro_uid': i['userId'],
'allegro_login': magicDecode(i['userLogin']),
'name': magicDecode(i['userFirstName']),
'surname': magicDecode(i['userLastName']),
'company': magicDecode(i['userCompany']),
'postcode': magicDecode(i['userPostcode']),
'city': magicDecode(i['userCity']),
'address': magicDecode(i['userAddress']),
'email': magicDecode(i['userEmail']),
'phone': i['userPhone'],
'price': bids[i['userId']]['price'],
'quantity': bids[i['userId']]['quantity'],
'date_buy': bids[i['userId']]['date_buy']
})
orders[auction['itemId']] = orders_auction
return orders | python | def getOrders(self, auction_ids):
"""Return orders details."""
orders = {}
# chunk list (only 25 auction_ids per request)
for chunk in chunked(auction_ids, 25):
# auctions = [{'item': auction_id} for auction_id in chunk] # TODO?: is it needed?
auctions = self.ArrayOfLong(chunk)
rc = self.__ask__('doGetPostBuyData', itemsArray=auctions)
for auction in rc:
orders_auction = []
bids = self.getBids(auction['itemId'])
# get orders details
# for i in auction.get('usersPostBuyData', ()):
if not auction['usersPostBuyData']: # empty
continue
for i in auction['usersPostBuyData']['item']:
i = i['userData']
if i['userId'] not in bids: # temporary(?) webapi bug fix
continue
orders_auction.append({
'allegro_aid': auction['itemId'],
'allegro_uid': i['userId'],
'allegro_login': magicDecode(i['userLogin']),
'name': magicDecode(i['userFirstName']),
'surname': magicDecode(i['userLastName']),
'company': magicDecode(i['userCompany']),
'postcode': magicDecode(i['userPostcode']),
'city': magicDecode(i['userCity']),
'address': magicDecode(i['userAddress']),
'email': magicDecode(i['userEmail']),
'phone': i['userPhone'],
'price': bids[i['userId']]['price'],
'quantity': bids[i['userId']]['quantity'],
'date_buy': bids[i['userId']]['date_buy']
})
orders[auction['itemId']] = orders_auction
return orders | ['def', 'getOrders', '(', 'self', ',', 'auction_ids', ')', ':', 'orders', '=', '{', '}', '# chunk list (only 25 auction_ids per request)', 'for', 'chunk', 'in', 'chunked', '(', 'auction_ids', ',', '25', ')', ':', "# auctions = [{'item': auction_id} for auction_id in chunk] # TODO?: is it needed?", 'auctions', '=', 'self', '.', 'ArrayOfLong', '(', 'chunk', ')', 'rc', '=', 'self', '.', '__ask__', '(', "'doGetPostBuyData'", ',', 'itemsArray', '=', 'auctions', ')', 'for', 'auction', 'in', 'rc', ':', 'orders_auction', '=', '[', ']', 'bids', '=', 'self', '.', 'getBids', '(', 'auction', '[', "'itemId'", ']', ')', '# get orders details', "# for i in auction.get('usersPostBuyData', ()):", 'if', 'not', 'auction', '[', "'usersPostBuyData'", ']', ':', '# empty', 'continue', 'for', 'i', 'in', 'auction', '[', "'usersPostBuyData'", ']', '[', "'item'", ']', ':', 'i', '=', 'i', '[', "'userData'", ']', 'if', 'i', '[', "'userId'", ']', 'not', 'in', 'bids', ':', '# temporary(?) webapi bug fix', 'continue', 'orders_auction', '.', 'append', '(', '{', "'allegro_aid'", ':', 'auction', '[', "'itemId'", ']', ',', "'allegro_uid'", ':', 'i', '[', "'userId'", ']', ',', "'allegro_login'", ':', 'magicDecode', '(', 'i', '[', "'userLogin'", ']', ')', ',', "'name'", ':', 'magicDecode', '(', 'i', '[', "'userFirstName'", ']', ')', ',', "'surname'", ':', 'magicDecode', '(', 'i', '[', "'userLastName'", ']', ')', ',', "'company'", ':', 'magicDecode', '(', 'i', '[', "'userCompany'", ']', ')', ',', "'postcode'", ':', 'magicDecode', '(', 'i', '[', "'userPostcode'", ']', ')', ',', "'city'", ':', 'magicDecode', '(', 'i', '[', "'userCity'", ']', ')', ',', "'address'", ':', 'magicDecode', '(', 'i', '[', "'userAddress'", ']', ')', ',', "'email'", ':', 'magicDecode', '(', 'i', '[', "'userEmail'", ']', ')', ',', "'phone'", ':', 'i', '[', "'userPhone'", ']', ',', "'price'", ':', 'bids', '[', 'i', '[', "'userId'", ']', ']', '[', "'price'", ']', ',', "'quantity'", ':', 'bids', '[', 'i', '[', "'userId'", ']', ']', '[', "'quantity'", ']', ',', "'date_buy'", ':', 'bids', '[', 'i', '[', "'userId'", ']', ']', '[', "'date_buy'", ']', '}', ')', 'orders', '[', 'auction', '[', "'itemId'", ']', ']', '=', 'orders_auction', 'return', 'orders'] | Return orders details. | ['Return', 'orders', 'details', '.'] | train | https://github.com/oczkers/pyllegro/blob/c6d7090560cb9e579f7f769a9eec131a3db2c258/pyllegro/core.py#L181-L217 |
6,024 | stitchfix/pyxley | pyxley/charts/mg/graphic.py | Graphic.custom_line_color_map | def custom_line_color_map(self, values):
"""Set the custom line color map.
Args:
values (list): list of colors.
Raises:
TypeError: Custom line color map must be a list.
"""
if not isinstance(values, list):
raise TypeError("custom_line_color_map must be a list")
self.options["custom_line_color_map"] = values | python | def custom_line_color_map(self, values):
"""Set the custom line color map.
Args:
values (list): list of colors.
Raises:
TypeError: Custom line color map must be a list.
"""
if not isinstance(values, list):
raise TypeError("custom_line_color_map must be a list")
self.options["custom_line_color_map"] = values | ['def', 'custom_line_color_map', '(', 'self', ',', 'values', ')', ':', 'if', 'not', 'isinstance', '(', 'values', ',', 'list', ')', ':', 'raise', 'TypeError', '(', '"custom_line_color_map must be a list"', ')', 'self', '.', 'options', '[', '"custom_line_color_map"', ']', '=', 'values'] | Set the custom line color map.
Args:
values (list): list of colors.
Raises:
TypeError: Custom line color map must be a list. | ['Set', 'the', 'custom', 'line', 'color', 'map', '.'] | train | https://github.com/stitchfix/pyxley/blob/2dab00022d977d986169cd8a629b3a2f91be893f/pyxley/charts/mg/graphic.py#L89-L101 |
6,025 | noahbenson/neuropythy | neuropythy/hcp/files.py | auto_download | def auto_download(status,
credentials=None, subjects_path=None, overwrite=False, release='HCP_1200',
database='hcp-openaccess', retinotopy_path=None, retinotopy_cache=True):
'''
auto_download(True) enables automatic downloading of HCP subject data when the subject ID
is requested. The optional arguments are identical to those required for the function
download(), and they are passed to download() when auto-downloading occurs.
auto_download(False) disables automatic downloading.
Automatic downloading is disabled by default unless the environment variable
HCP_AUTO_DOWNLOAD is set to true. In this case, the database and release are derived from
the environment variables HCP_AUTO_DATABASE and HCP_AUTO_RELEASE, and the variable
HCP_AUTO_PATH can be used to override the default subjects path.
'''
global _auto_download_options, _retinotopy_path
status = (['structure','retinotopy'] if status is True else
[] if status is False else
[status] if pimms.is_str(status) else
status)
_auto_download_options = {'structure':False, 'retinotopy':False}
for s in status:
if s.lower() == 'structure':
if s3fs is None:
raise RuntimeError(
's3fs was not successfully loaded, so downloads may not occur; check'
' your Python configuration to make sure that s3fs is installed. See'
' http://s3fs.readthedocs.io/en/latest/install.html for details.')
if credentials is None: credentials = config['hcp_credentials']
if credentials is None: raise ValueError('No HCP credentials detected or found')
(s3fs_key, s3fs_secret) = to_credentials(credentials)
if subjects_path is None:
sdirs = config['hcp_subject_paths']
subjects_path = next((sd for sd in sdirs if os.path.isdir(sd)), None)
if subjects_path is None: raise ValueError('No subjects path given or found')
else: subjects_path = os.path.expanduser(subjects_path)
fs = s3fs.S3FileSystem(key=s3fs_key, secret=s3fs_secret)
hcpbase = '/'.join([database, release])
if not fs.exists(hcpbase):
raise ValueError('database/release (%s/%s) not found' % (database, release))
sids = set([])
for f in fs.ls(hcpbase):
f = os.path.split(f)[-1]
if len(f) == 6 and f[0] != '0':
try: sids.add(int(f))
except Exception: pass
_auto_download_options['structure'] = True
_auto_download_options['subjects_path'] = subjects_path
_auto_download_options['overwrite'] = overwrite
_auto_download_options['release'] = release
_auto_download_options['database'] = database
_auto_download_options['subject_ids'] = frozenset(sids)
_auto_download_options['s3fs'] = fs
elif s.lower() == 'retinotopy':
if retinotopy_path is None:
dirs = config['hcp_subject_paths']
if subjects_path is not None: dirs = [subjects_path] + list(dirs)
if _retinotopy_path is not None: dirs = [_retinotopy_path] + list(dirs)
retinotopy_path = next((sd for sd in dirs if os.path.isdir(sd)), None)
if retinotopy_path is None: raise ValueError('No retinotopy path given or found')
else: retinotopy_path = os.path.expanduser(retinotopy_path)
_auto_download_options['retinotopy'] = True
_auto_download_options['retinotopy_path'] = retinotopy_path
_auto_download_options['retinotopy_cache'] = retinotopy_cache
else: raise ValueError('unrecognized auto_download argument: %s' % s)
if all(v is False for v in six.itervalues(_auto_download_options)):
_auto_download_options = None | python | def auto_download(status,
credentials=None, subjects_path=None, overwrite=False, release='HCP_1200',
database='hcp-openaccess', retinotopy_path=None, retinotopy_cache=True):
'''
auto_download(True) enables automatic downloading of HCP subject data when the subject ID
is requested. The optional arguments are identical to those required for the function
download(), and they are passed to download() when auto-downloading occurs.
auto_download(False) disables automatic downloading.
Automatic downloading is disabled by default unless the environment variable
HCP_AUTO_DOWNLOAD is set to true. In this case, the database and release are derived from
the environment variables HCP_AUTO_DATABASE and HCP_AUTO_RELEASE, and the variable
HCP_AUTO_PATH can be used to override the default subjects path.
'''
global _auto_download_options, _retinotopy_path
status = (['structure','retinotopy'] if status is True else
[] if status is False else
[status] if pimms.is_str(status) else
status)
_auto_download_options = {'structure':False, 'retinotopy':False}
for s in status:
if s.lower() == 'structure':
if s3fs is None:
raise RuntimeError(
's3fs was not successfully loaded, so downloads may not occur; check'
' your Python configuration to make sure that s3fs is installed. See'
' http://s3fs.readthedocs.io/en/latest/install.html for details.')
if credentials is None: credentials = config['hcp_credentials']
if credentials is None: raise ValueError('No HCP credentials detected or found')
(s3fs_key, s3fs_secret) = to_credentials(credentials)
if subjects_path is None:
sdirs = config['hcp_subject_paths']
subjects_path = next((sd for sd in sdirs if os.path.isdir(sd)), None)
if subjects_path is None: raise ValueError('No subjects path given or found')
else: subjects_path = os.path.expanduser(subjects_path)
fs = s3fs.S3FileSystem(key=s3fs_key, secret=s3fs_secret)
hcpbase = '/'.join([database, release])
if not fs.exists(hcpbase):
raise ValueError('database/release (%s/%s) not found' % (database, release))
sids = set([])
for f in fs.ls(hcpbase):
f = os.path.split(f)[-1]
if len(f) == 6 and f[0] != '0':
try: sids.add(int(f))
except Exception: pass
_auto_download_options['structure'] = True
_auto_download_options['subjects_path'] = subjects_path
_auto_download_options['overwrite'] = overwrite
_auto_download_options['release'] = release
_auto_download_options['database'] = database
_auto_download_options['subject_ids'] = frozenset(sids)
_auto_download_options['s3fs'] = fs
elif s.lower() == 'retinotopy':
if retinotopy_path is None:
dirs = config['hcp_subject_paths']
if subjects_path is not None: dirs = [subjects_path] + list(dirs)
if _retinotopy_path is not None: dirs = [_retinotopy_path] + list(dirs)
retinotopy_path = next((sd for sd in dirs if os.path.isdir(sd)), None)
if retinotopy_path is None: raise ValueError('No retinotopy path given or found')
else: retinotopy_path = os.path.expanduser(retinotopy_path)
_auto_download_options['retinotopy'] = True
_auto_download_options['retinotopy_path'] = retinotopy_path
_auto_download_options['retinotopy_cache'] = retinotopy_cache
else: raise ValueError('unrecognized auto_download argument: %s' % s)
if all(v is False for v in six.itervalues(_auto_download_options)):
_auto_download_options = None | ['def', 'auto_download', '(', 'status', ',', 'credentials', '=', 'None', ',', 'subjects_path', '=', 'None', ',', 'overwrite', '=', 'False', ',', 'release', '=', "'HCP_1200'", ',', 'database', '=', "'hcp-openaccess'", ',', 'retinotopy_path', '=', 'None', ',', 'retinotopy_cache', '=', 'True', ')', ':', 'global', '_auto_download_options', ',', '_retinotopy_path', 'status', '=', '(', '[', "'structure'", ',', "'retinotopy'", ']', 'if', 'status', 'is', 'True', 'else', '[', ']', 'if', 'status', 'is', 'False', 'else', '[', 'status', ']', 'if', 'pimms', '.', 'is_str', '(', 'status', ')', 'else', 'status', ')', '_auto_download_options', '=', '{', "'structure'", ':', 'False', ',', "'retinotopy'", ':', 'False', '}', 'for', 's', 'in', 'status', ':', 'if', 's', '.', 'lower', '(', ')', '==', "'structure'", ':', 'if', 's3fs', 'is', 'None', ':', 'raise', 'RuntimeError', '(', "'s3fs was not successfully loaded, so downloads may not occur; check'", "' your Python configuration to make sure that s3fs is installed. See'", "' http://s3fs.readthedocs.io/en/latest/install.html for details.'", ')', 'if', 'credentials', 'is', 'None', ':', 'credentials', '=', 'config', '[', "'hcp_credentials'", ']', 'if', 'credentials', 'is', 'None', ':', 'raise', 'ValueError', '(', "'No HCP credentials detected or found'", ')', '(', 's3fs_key', ',', 's3fs_secret', ')', '=', 'to_credentials', '(', 'credentials', ')', 'if', 'subjects_path', 'is', 'None', ':', 'sdirs', '=', 'config', '[', "'hcp_subject_paths'", ']', 'subjects_path', '=', 'next', '(', '(', 'sd', 'for', 'sd', 'in', 'sdirs', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'sd', ')', ')', ',', 'None', ')', 'if', 'subjects_path', 'is', 'None', ':', 'raise', 'ValueError', '(', "'No subjects path given or found'", ')', 'else', ':', 'subjects_path', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'subjects_path', ')', 'fs', '=', 's3fs', '.', 'S3FileSystem', '(', 'key', '=', 's3fs_key', ',', 'secret', '=', 's3fs_secret', ')', 'hcpbase', '=', "'/'", '.', 'join', '(', '[', 'database', ',', 'release', ']', ')', 'if', 'not', 'fs', '.', 'exists', '(', 'hcpbase', ')', ':', 'raise', 'ValueError', '(', "'database/release (%s/%s) not found'", '%', '(', 'database', ',', 'release', ')', ')', 'sids', '=', 'set', '(', '[', ']', ')', 'for', 'f', 'in', 'fs', '.', 'ls', '(', 'hcpbase', ')', ':', 'f', '=', 'os', '.', 'path', '.', 'split', '(', 'f', ')', '[', '-', '1', ']', 'if', 'len', '(', 'f', ')', '==', '6', 'and', 'f', '[', '0', ']', '!=', "'0'", ':', 'try', ':', 'sids', '.', 'add', '(', 'int', '(', 'f', ')', ')', 'except', 'Exception', ':', 'pass', '_auto_download_options', '[', "'structure'", ']', '=', 'True', '_auto_download_options', '[', "'subjects_path'", ']', '=', 'subjects_path', '_auto_download_options', '[', "'overwrite'", ']', '=', 'overwrite', '_auto_download_options', '[', "'release'", ']', '=', 'release', '_auto_download_options', '[', "'database'", ']', '=', 'database', '_auto_download_options', '[', "'subject_ids'", ']', '=', 'frozenset', '(', 'sids', ')', '_auto_download_options', '[', "'s3fs'", ']', '=', 'fs', 'elif', 's', '.', 'lower', '(', ')', '==', "'retinotopy'", ':', 'if', 'retinotopy_path', 'is', 'None', ':', 'dirs', '=', 'config', '[', "'hcp_subject_paths'", ']', 'if', 'subjects_path', 'is', 'not', 'None', ':', 'dirs', '=', '[', 'subjects_path', ']', '+', 'list', '(', 'dirs', ')', 'if', '_retinotopy_path', 'is', 'not', 'None', ':', 'dirs', '=', '[', '_retinotopy_path', ']', '+', 'list', '(', 'dirs', ')', 'retinotopy_path', '=', 'next', '(', '(', 'sd', 'for', 'sd', 'in', 'dirs', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'sd', ')', ')', ',', 'None', ')', 'if', 'retinotopy_path', 'is', 'None', ':', 'raise', 'ValueError', '(', "'No retinotopy path given or found'", ')', 'else', ':', 'retinotopy_path', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'retinotopy_path', ')', '_auto_download_options', '[', "'retinotopy'", ']', '=', 'True', '_auto_download_options', '[', "'retinotopy_path'", ']', '=', 'retinotopy_path', '_auto_download_options', '[', "'retinotopy_cache'", ']', '=', 'retinotopy_cache', 'else', ':', 'raise', 'ValueError', '(', "'unrecognized auto_download argument: %s'", '%', 's', ')', 'if', 'all', '(', 'v', 'is', 'False', 'for', 'v', 'in', 'six', '.', 'itervalues', '(', '_auto_download_options', ')', ')', ':', '_auto_download_options', '=', 'None'] | auto_download(True) enables automatic downloading of HCP subject data when the subject ID
is requested. The optional arguments are identical to those required for the function
download(), and they are passed to download() when auto-downloading occurs.
auto_download(False) disables automatic downloading.
Automatic downloading is disabled by default unless the environment variable
HCP_AUTO_DOWNLOAD is set to true. In this case, the database and release are derived from
the environment variables HCP_AUTO_DATABASE and HCP_AUTO_RELEASE, and the variable
HCP_AUTO_PATH can be used to override the default subjects path. | ['auto_download', '(', 'True', ')', 'enables', 'automatic', 'downloading', 'of', 'HCP', 'subject', 'data', 'when', 'the', 'subject', 'ID', 'is', 'requested', '.', 'The', 'optional', 'arguments', 'are', 'identical', 'to', 'those', 'required', 'for', 'the', 'function', 'download', '()', 'and', 'they', 'are', 'passed', 'to', 'download', '()', 'when', 'auto', '-', 'downloading', 'occurs', '.', 'auto_download', '(', 'False', ')', 'disables', 'automatic', 'downloading', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/hcp/files.py#L1833-L1898 |
6,026 | mjirik/imtools | imtools/tools.py | sliding_window_3d | def sliding_window_3d(image, step_size, window_size, mask=None, only_whole=True, include_last=False):
"""
Creates generator of sliding windows.
:param image: input image
:param step_size: number of pixels we are going to skip in both the (x, y) direction
:param window_size: the width and height of the window we are going to extract
:param mask: region of interest, if None it will slide through the whole image
:param only_whole: if True - produces only windows of the given window_size
:return: generator that produce upper left corner of the window, center of the window and the sliding window itself
"""
if not isinstance(step_size, tuple):
step_size = (step_size, step_size, step_size)
if image.ndim == 2:
image = np.expand_dims(image, 0)
window_size = (1, window_size[0], window_size[1])
if mask is not None:
mask = np.expand_dims(mask, 0)
if mask is None:
mask = np.ones(image.shape, dtype=np.bool)
# slide a window across the image
for z in xrange(0, image.shape[0], step_size[0]):
# c_z = z + window_size[0] / 2.
for y in xrange(0, image.shape[1], step_size[1]):
# c_y = y + window_size[2] / 2.
for x in xrange(0, image.shape[2], step_size[2]):
# c_x = x + window_size[1] / 2.
# if c_z < mask.shape[0] and c_x < mask.shape[2] and c_y < mask.shape[1] and mask[c_z, c_y, c_x]:
# yield the current window
end_x = x + window_size[1]
end_y = y + window_size[2]
end_z = z + window_size[0]
if only_whole and (end_z > image.shape[0] or end_x > image.shape[2] or end_y > image.shape[1]):
# if only_whole:
continue
# elif include_last:
# mask_out = np.zeros(image.shape, dtype=np.bool)
# x = image.shape[2] - window_size[1]
# y = image.shape[1] - window_size[2]
# z = image.shape[0] - window_size[0]
# end_x = image.shape[2]
# end_y = image.shape[1]
# end_z = image.shape[0]
#
# mask_out[z:end_z, y:end_y, x:end_x] = True
# yield (x, y, z, mask_out, image[z:end_z, y:end_y, x:end_x])
else:
mask_out = np.zeros(image.shape, dtype=np.bool)
mask_out[z:end_z, y:end_y, x:end_x] = True
yield (x, y, z, mask_out, image[z:end_z, y:end_y, x:end_x]) | python | def sliding_window_3d(image, step_size, window_size, mask=None, only_whole=True, include_last=False):
"""
Creates generator of sliding windows.
:param image: input image
:param step_size: number of pixels we are going to skip in both the (x, y) direction
:param window_size: the width and height of the window we are going to extract
:param mask: region of interest, if None it will slide through the whole image
:param only_whole: if True - produces only windows of the given window_size
:return: generator that produce upper left corner of the window, center of the window and the sliding window itself
"""
if not isinstance(step_size, tuple):
step_size = (step_size, step_size, step_size)
if image.ndim == 2:
image = np.expand_dims(image, 0)
window_size = (1, window_size[0], window_size[1])
if mask is not None:
mask = np.expand_dims(mask, 0)
if mask is None:
mask = np.ones(image.shape, dtype=np.bool)
# slide a window across the image
for z in xrange(0, image.shape[0], step_size[0]):
# c_z = z + window_size[0] / 2.
for y in xrange(0, image.shape[1], step_size[1]):
# c_y = y + window_size[2] / 2.
for x in xrange(0, image.shape[2], step_size[2]):
# c_x = x + window_size[1] / 2.
# if c_z < mask.shape[0] and c_x < mask.shape[2] and c_y < mask.shape[1] and mask[c_z, c_y, c_x]:
# yield the current window
end_x = x + window_size[1]
end_y = y + window_size[2]
end_z = z + window_size[0]
if only_whole and (end_z > image.shape[0] or end_x > image.shape[2] or end_y > image.shape[1]):
# if only_whole:
continue
# elif include_last:
# mask_out = np.zeros(image.shape, dtype=np.bool)
# x = image.shape[2] - window_size[1]
# y = image.shape[1] - window_size[2]
# z = image.shape[0] - window_size[0]
# end_x = image.shape[2]
# end_y = image.shape[1]
# end_z = image.shape[0]
#
# mask_out[z:end_z, y:end_y, x:end_x] = True
# yield (x, y, z, mask_out, image[z:end_z, y:end_y, x:end_x])
else:
mask_out = np.zeros(image.shape, dtype=np.bool)
mask_out[z:end_z, y:end_y, x:end_x] = True
yield (x, y, z, mask_out, image[z:end_z, y:end_y, x:end_x]) | ['def', 'sliding_window_3d', '(', 'image', ',', 'step_size', ',', 'window_size', ',', 'mask', '=', 'None', ',', 'only_whole', '=', 'True', ',', 'include_last', '=', 'False', ')', ':', 'if', 'not', 'isinstance', '(', 'step_size', ',', 'tuple', ')', ':', 'step_size', '=', '(', 'step_size', ',', 'step_size', ',', 'step_size', ')', 'if', 'image', '.', 'ndim', '==', '2', ':', 'image', '=', 'np', '.', 'expand_dims', '(', 'image', ',', '0', ')', 'window_size', '=', '(', '1', ',', 'window_size', '[', '0', ']', ',', 'window_size', '[', '1', ']', ')', 'if', 'mask', 'is', 'not', 'None', ':', 'mask', '=', 'np', '.', 'expand_dims', '(', 'mask', ',', '0', ')', 'if', 'mask', 'is', 'None', ':', 'mask', '=', 'np', '.', 'ones', '(', 'image', '.', 'shape', ',', 'dtype', '=', 'np', '.', 'bool', ')', '# slide a window across the image', 'for', 'z', 'in', 'xrange', '(', '0', ',', 'image', '.', 'shape', '[', '0', ']', ',', 'step_size', '[', '0', ']', ')', ':', '# c_z = z + window_size[0] / 2.', 'for', 'y', 'in', 'xrange', '(', '0', ',', 'image', '.', 'shape', '[', '1', ']', ',', 'step_size', '[', '1', ']', ')', ':', '# c_y = y + window_size[2] / 2.', 'for', 'x', 'in', 'xrange', '(', '0', ',', 'image', '.', 'shape', '[', '2', ']', ',', 'step_size', '[', '2', ']', ')', ':', '# c_x = x + window_size[1] / 2.', '# if c_z < mask.shape[0] and c_x < mask.shape[2] and c_y < mask.shape[1] and mask[c_z, c_y, c_x]:', '# yield the current window', 'end_x', '=', 'x', '+', 'window_size', '[', '1', ']', 'end_y', '=', 'y', '+', 'window_size', '[', '2', ']', 'end_z', '=', 'z', '+', 'window_size', '[', '0', ']', 'if', 'only_whole', 'and', '(', 'end_z', '>', 'image', '.', 'shape', '[', '0', ']', 'or', 'end_x', '>', 'image', '.', 'shape', '[', '2', ']', 'or', 'end_y', '>', 'image', '.', 'shape', '[', '1', ']', ')', ':', '# if only_whole:', 'continue', '# elif include_last:', '# mask_out = np.zeros(image.shape, dtype=np.bool)', '# x = image.shape[2] - window_size[1]', '# y = image.shape[1] - window_size[2]', '# z = image.shape[0] - window_size[0]', '# end_x = image.shape[2]', '# end_y = image.shape[1]', '# end_z = image.shape[0]', '#', '# mask_out[z:end_z, y:end_y, x:end_x] = True', '# yield (x, y, z, mask_out, image[z:end_z, y:end_y, x:end_x])', 'else', ':', 'mask_out', '=', 'np', '.', 'zeros', '(', 'image', '.', 'shape', ',', 'dtype', '=', 'np', '.', 'bool', ')', 'mask_out', '[', 'z', ':', 'end_z', ',', 'y', ':', 'end_y', ',', 'x', ':', 'end_x', ']', '=', 'True', 'yield', '(', 'x', ',', 'y', ',', 'z', ',', 'mask_out', ',', 'image', '[', 'z', ':', 'end_z', ',', 'y', ':', 'end_y', ',', 'x', ':', 'end_x', ']', ')'] | Creates generator of sliding windows.
:param image: input image
:param step_size: number of pixels we are going to skip in both the (x, y) direction
:param window_size: the width and height of the window we are going to extract
:param mask: region of interest, if None it will slide through the whole image
:param only_whole: if True - produces only windows of the given window_size
:return: generator that produce upper left corner of the window, center of the window and the sliding window itself | ['Creates', 'generator', 'of', 'sliding', 'windows', '.', ':', 'param', 'image', ':', 'input', 'image', ':', 'param', 'step_size', ':', 'number', 'of', 'pixels', 'we', 'are', 'going', 'to', 'skip', 'in', 'both', 'the', '(', 'x', 'y', ')', 'direction', ':', 'param', 'window_size', ':', 'the', 'width', 'and', 'height', 'of', 'the', 'window', 'we', 'are', 'going', 'to', 'extract', ':', 'param', 'mask', ':', 'region', 'of', 'interest', 'if', 'None', 'it', 'will', 'slide', 'through', 'the', 'whole', 'image', ':', 'param', 'only_whole', ':', 'if', 'True', '-', 'produces', 'only', 'windows', 'of', 'the', 'given', 'window_size', ':', 'return', ':', 'generator', 'that', 'produce', 'upper', 'left', 'corner', 'of', 'the', 'window', 'center', 'of', 'the', 'window', 'and', 'the', 'sliding', 'window', 'itself'] | train | https://github.com/mjirik/imtools/blob/eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a/imtools/tools.py#L1325-L1373 |
6,027 | Karaage-Cluster/python-tldap | tldap/backend/fake_transactions.py | LDAPwrapper.rename | def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:
"""
rename a dn in the ldap database; see ldap module. doesn't return a
result if transactions enabled.
"""
_debug("rename", self, dn, new_rdn, new_base_dn)
# split up the parameters
split_dn = tldap.dn.str2dn(dn)
split_newrdn = tldap.dn.str2dn(new_rdn)
assert(len(split_newrdn) == 1)
# make dn unqualified
rdn = tldap.dn.dn2str(split_dn[0:1])
# make newrdn fully qualified dn
tmplist = [split_newrdn[0]]
if new_base_dn is not None:
tmplist.extend(tldap.dn.str2dn(new_base_dn))
old_base_dn = tldap.dn.dn2str(split_dn[1:])
else:
tmplist.extend(split_dn[1:])
old_base_dn = None
newdn = tldap.dn.dn2str(tmplist)
_debug("--> commit ", self, dn, new_rdn, new_base_dn)
_debug("--> rollback", self, newdn, rdn, old_base_dn)
# on commit carry out action; on rollback reverse rename
def on_commit(obj):
obj.modify_dn(dn, new_rdn, new_superior=new_base_dn)
def on_rollback(obj):
obj.modify_dn(newdn, rdn, new_superior=old_base_dn)
return self._process(on_commit, on_rollback) | python | def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:
"""
rename a dn in the ldap database; see ldap module. doesn't return a
result if transactions enabled.
"""
_debug("rename", self, dn, new_rdn, new_base_dn)
# split up the parameters
split_dn = tldap.dn.str2dn(dn)
split_newrdn = tldap.dn.str2dn(new_rdn)
assert(len(split_newrdn) == 1)
# make dn unqualified
rdn = tldap.dn.dn2str(split_dn[0:1])
# make newrdn fully qualified dn
tmplist = [split_newrdn[0]]
if new_base_dn is not None:
tmplist.extend(tldap.dn.str2dn(new_base_dn))
old_base_dn = tldap.dn.dn2str(split_dn[1:])
else:
tmplist.extend(split_dn[1:])
old_base_dn = None
newdn = tldap.dn.dn2str(tmplist)
_debug("--> commit ", self, dn, new_rdn, new_base_dn)
_debug("--> rollback", self, newdn, rdn, old_base_dn)
# on commit carry out action; on rollback reverse rename
def on_commit(obj):
obj.modify_dn(dn, new_rdn, new_superior=new_base_dn)
def on_rollback(obj):
obj.modify_dn(newdn, rdn, new_superior=old_base_dn)
return self._process(on_commit, on_rollback) | ['def', 'rename', '(', 'self', ',', 'dn', ':', 'str', ',', 'new_rdn', ':', 'str', ',', 'new_base_dn', ':', 'Optional', '[', 'str', ']', '=', 'None', ')', '->', 'None', ':', '_debug', '(', '"rename"', ',', 'self', ',', 'dn', ',', 'new_rdn', ',', 'new_base_dn', ')', '# split up the parameters', 'split_dn', '=', 'tldap', '.', 'dn', '.', 'str2dn', '(', 'dn', ')', 'split_newrdn', '=', 'tldap', '.', 'dn', '.', 'str2dn', '(', 'new_rdn', ')', 'assert', '(', 'len', '(', 'split_newrdn', ')', '==', '1', ')', '# make dn unqualified', 'rdn', '=', 'tldap', '.', 'dn', '.', 'dn2str', '(', 'split_dn', '[', '0', ':', '1', ']', ')', '# make newrdn fully qualified dn', 'tmplist', '=', '[', 'split_newrdn', '[', '0', ']', ']', 'if', 'new_base_dn', 'is', 'not', 'None', ':', 'tmplist', '.', 'extend', '(', 'tldap', '.', 'dn', '.', 'str2dn', '(', 'new_base_dn', ')', ')', 'old_base_dn', '=', 'tldap', '.', 'dn', '.', 'dn2str', '(', 'split_dn', '[', '1', ':', ']', ')', 'else', ':', 'tmplist', '.', 'extend', '(', 'split_dn', '[', '1', ':', ']', ')', 'old_base_dn', '=', 'None', 'newdn', '=', 'tldap', '.', 'dn', '.', 'dn2str', '(', 'tmplist', ')', '_debug', '(', '"--> commit "', ',', 'self', ',', 'dn', ',', 'new_rdn', ',', 'new_base_dn', ')', '_debug', '(', '"--> rollback"', ',', 'self', ',', 'newdn', ',', 'rdn', ',', 'old_base_dn', ')', '# on commit carry out action; on rollback reverse rename', 'def', 'on_commit', '(', 'obj', ')', ':', 'obj', '.', 'modify_dn', '(', 'dn', ',', 'new_rdn', ',', 'new_superior', '=', 'new_base_dn', ')', 'def', 'on_rollback', '(', 'obj', ')', ':', 'obj', '.', 'modify_dn', '(', 'newdn', ',', 'rdn', ',', 'new_superior', '=', 'old_base_dn', ')', 'return', 'self', '.', '_process', '(', 'on_commit', ',', 'on_rollback', ')'] | rename a dn in the ldap database; see ldap module. doesn't return a
result if transactions enabled. | ['rename', 'a', 'dn', 'in', 'the', 'ldap', 'database', ';', 'see', 'ldap', 'module', '.', 'doesn', 't', 'return', 'a', 'result', 'if', 'transactions', 'enabled', '.'] | train | https://github.com/Karaage-Cluster/python-tldap/blob/61f1af74a3648cb6491e7eeb1ee2eb395d67bf59/tldap/backend/fake_transactions.py#L349-L385 |
6,028 | googleads/googleads-python-lib | examples/adwords/v201809/shopping/add_shopping_campaign_for_showcase_ads.py | ProductPartitionHelper.CreateUnit | def CreateUnit(self, parent=None, value=None, bid_amount=None):
"""Creates a unit node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
bid_amount: The amount to bid for matching products, in micros.
Returns:
A new unit node.
"""
unit = {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
}
# The root node has neither a parent nor a value.
if parent is not None:
unit['parentCriterionId'] = parent['id']
unit['caseValue'] = value
if bid_amount is not None and bid_amount > 0:
# Note: Showcase ads require that the campaign has a ManualCpc
# BiddingStrategyConfiguration.
bidding_strategy_configuration = {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'xsi_type': 'Money',
'microAmount': str(bid_amount)
}
}]
}
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'biddingStrategyConfiguration': bidding_strategy_configuration
}
else:
adgroup_criterion = {
'xsi_type': 'NegativeAdGroupCriterion'
}
adgroup_criterion['adGroupId'] = self.adgroup_id
adgroup_criterion['criterion'] = unit
self.CreateAddOperation(adgroup_criterion)
return unit | python | def CreateUnit(self, parent=None, value=None, bid_amount=None):
"""Creates a unit node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
bid_amount: The amount to bid for matching products, in micros.
Returns:
A new unit node.
"""
unit = {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
}
# The root node has neither a parent nor a value.
if parent is not None:
unit['parentCriterionId'] = parent['id']
unit['caseValue'] = value
if bid_amount is not None and bid_amount > 0:
# Note: Showcase ads require that the campaign has a ManualCpc
# BiddingStrategyConfiguration.
bidding_strategy_configuration = {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'xsi_type': 'Money',
'microAmount': str(bid_amount)
}
}]
}
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'biddingStrategyConfiguration': bidding_strategy_configuration
}
else:
adgroup_criterion = {
'xsi_type': 'NegativeAdGroupCriterion'
}
adgroup_criterion['adGroupId'] = self.adgroup_id
adgroup_criterion['criterion'] = unit
self.CreateAddOperation(adgroup_criterion)
return unit | ['def', 'CreateUnit', '(', 'self', ',', 'parent', '=', 'None', ',', 'value', '=', 'None', ',', 'bid_amount', '=', 'None', ')', ':', 'unit', '=', '{', "'xsi_type'", ':', "'ProductPartition'", ',', "'partitionType'", ':', "'UNIT'", '}', '# The root node has neither a parent nor a value.', 'if', 'parent', 'is', 'not', 'None', ':', 'unit', '[', "'parentCriterionId'", ']', '=', 'parent', '[', "'id'", ']', 'unit', '[', "'caseValue'", ']', '=', 'value', 'if', 'bid_amount', 'is', 'not', 'None', 'and', 'bid_amount', '>', '0', ':', '# Note: Showcase ads require that the campaign has a ManualCpc', '# BiddingStrategyConfiguration.', 'bidding_strategy_configuration', '=', '{', "'bids'", ':', '[', '{', "'xsi_type'", ':', "'CpcBid'", ',', "'bid'", ':', '{', "'xsi_type'", ':', "'Money'", ',', "'microAmount'", ':', 'str', '(', 'bid_amount', ')', '}', '}', ']', '}', 'adgroup_criterion', '=', '{', "'xsi_type'", ':', "'BiddableAdGroupCriterion'", ',', "'biddingStrategyConfiguration'", ':', 'bidding_strategy_configuration', '}', 'else', ':', 'adgroup_criterion', '=', '{', "'xsi_type'", ':', "'NegativeAdGroupCriterion'", '}', 'adgroup_criterion', '[', "'adGroupId'", ']', '=', 'self', '.', 'adgroup_id', 'adgroup_criterion', '[', "'criterion'", ']', '=', 'unit', 'self', '.', 'CreateAddOperation', '(', 'adgroup_criterion', ')', 'return', 'unit'] | Creates a unit node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
bid_amount: The amount to bid for matching products, in micros.
Returns:
A new unit node. | ['Creates', 'a', 'unit', 'node', '.'] | train | https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/examples/adwords/v201809/shopping/add_shopping_campaign_for_showcase_ads.py#L91-L138 |
6,029 | opendatateam/udata | udata/harvest/backends/base.py | BaseBackend.get_dataset | def get_dataset(self, remote_id):
'''Get or create a dataset given its remote ID (and its source)
We first try to match `source_id` to be source domain independent
'''
dataset = Dataset.objects(__raw__={
'extras.harvest:remote_id': remote_id,
'$or': [
{'extras.harvest:domain': self.source.domain},
{'extras.harvest:source_id': str(self.source.id)},
],
}).first()
return dataset or Dataset() | python | def get_dataset(self, remote_id):
'''Get or create a dataset given its remote ID (and its source)
We first try to match `source_id` to be source domain independent
'''
dataset = Dataset.objects(__raw__={
'extras.harvest:remote_id': remote_id,
'$or': [
{'extras.harvest:domain': self.source.domain},
{'extras.harvest:source_id': str(self.source.id)},
],
}).first()
return dataset or Dataset() | ['def', 'get_dataset', '(', 'self', ',', 'remote_id', ')', ':', 'dataset', '=', 'Dataset', '.', 'objects', '(', '__raw__', '=', '{', "'extras.harvest:remote_id'", ':', 'remote_id', ',', "'$or'", ':', '[', '{', "'extras.harvest:domain'", ':', 'self', '.', 'source', '.', 'domain', '}', ',', '{', "'extras.harvest:source_id'", ':', 'str', '(', 'self', '.', 'source', '.', 'id', ')', '}', ',', ']', ',', '}', ')', '.', 'first', '(', ')', 'return', 'dataset', 'or', 'Dataset', '(', ')'] | Get or create a dataset given its remote ID (and its source)
We first try to match `source_id` to be source domain independent | ['Get', 'or', 'create', 'a', 'dataset', 'given', 'its', 'remote', 'ID', '(', 'and', 'its', 'source', ')', 'We', 'first', 'try', 'to', 'match', 'source_id', 'to', 'be', 'source', 'domain', 'independent'] | train | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/backends/base.py#L252-L263 |
6,030 | saltstack/salt | salt/fileserver/hgfs.py | envs | def envs(ignore_cache=False):
'''
Return a list of refs that can be used as environments
'''
if not ignore_cache:
env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p')
cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)
if cache_match is not None:
return cache_match
ret = set()
for repo in init():
repo['repo'].open()
if repo['branch_method'] in ('branches', 'mixed'):
for branch in _all_branches(repo['repo']):
branch_name = branch[0]
if branch_name == repo['base']:
branch_name = 'base'
ret.add(branch_name)
if repo['branch_method'] in ('bookmarks', 'mixed'):
for bookmark in _all_bookmarks(repo['repo']):
bookmark_name = bookmark[0]
if bookmark_name == repo['base']:
bookmark_name = 'base'
ret.add(bookmark_name)
ret.update([x[0] for x in _all_tags(repo['repo'])])
repo['repo'].close()
return [x for x in sorted(ret) if _env_is_exposed(x)] | python | def envs(ignore_cache=False):
'''
Return a list of refs that can be used as environments
'''
if not ignore_cache:
env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p')
cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)
if cache_match is not None:
return cache_match
ret = set()
for repo in init():
repo['repo'].open()
if repo['branch_method'] in ('branches', 'mixed'):
for branch in _all_branches(repo['repo']):
branch_name = branch[0]
if branch_name == repo['base']:
branch_name = 'base'
ret.add(branch_name)
if repo['branch_method'] in ('bookmarks', 'mixed'):
for bookmark in _all_bookmarks(repo['repo']):
bookmark_name = bookmark[0]
if bookmark_name == repo['base']:
bookmark_name = 'base'
ret.add(bookmark_name)
ret.update([x[0] for x in _all_tags(repo['repo'])])
repo['repo'].close()
return [x for x in sorted(ret) if _env_is_exposed(x)] | ['def', 'envs', '(', 'ignore_cache', '=', 'False', ')', ':', 'if', 'not', 'ignore_cache', ':', 'env_cache', '=', 'os', '.', 'path', '.', 'join', '(', '__opts__', '[', "'cachedir'", ']', ',', "'hgfs/envs.p'", ')', 'cache_match', '=', 'salt', '.', 'fileserver', '.', 'check_env_cache', '(', '__opts__', ',', 'env_cache', ')', 'if', 'cache_match', 'is', 'not', 'None', ':', 'return', 'cache_match', 'ret', '=', 'set', '(', ')', 'for', 'repo', 'in', 'init', '(', ')', ':', 'repo', '[', "'repo'", ']', '.', 'open', '(', ')', 'if', 'repo', '[', "'branch_method'", ']', 'in', '(', "'branches'", ',', "'mixed'", ')', ':', 'for', 'branch', 'in', '_all_branches', '(', 'repo', '[', "'repo'", ']', ')', ':', 'branch_name', '=', 'branch', '[', '0', ']', 'if', 'branch_name', '==', 'repo', '[', "'base'", ']', ':', 'branch_name', '=', "'base'", 'ret', '.', 'add', '(', 'branch_name', ')', 'if', 'repo', '[', "'branch_method'", ']', 'in', '(', "'bookmarks'", ',', "'mixed'", ')', ':', 'for', 'bookmark', 'in', '_all_bookmarks', '(', 'repo', '[', "'repo'", ']', ')', ':', 'bookmark_name', '=', 'bookmark', '[', '0', ']', 'if', 'bookmark_name', '==', 'repo', '[', "'base'", ']', ':', 'bookmark_name', '=', "'base'", 'ret', '.', 'add', '(', 'bookmark_name', ')', 'ret', '.', 'update', '(', '[', 'x', '[', '0', ']', 'for', 'x', 'in', '_all_tags', '(', 'repo', '[', "'repo'", ']', ')', ']', ')', 'repo', '[', "'repo'", ']', '.', 'close', '(', ')', 'return', '[', 'x', 'for', 'x', 'in', 'sorted', '(', 'ret', ')', 'if', '_env_is_exposed', '(', 'x', ')', ']'] | Return a list of refs that can be used as environments | ['Return', 'a', 'list', 'of', 'refs', 'that', 'can', 'be', 'used', 'as', 'environments'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L610-L636 |
6,031 | thunder-project/thunder | thunder/series/series.py | Series.convolve | def convolve(self, signal, mode='full'):
"""
Convolve series data against another signal.
Parameters
----------
signal : array
Signal to convolve with (must be 1D)
mode : str, optional, default='full'
Mode of convolution, options are 'full', 'same', and 'valid'
"""
from numpy import convolve
s = asarray(signal)
n = size(self.index)
m = size(s)
# use expected lengths to make a new index
if mode == 'same':
newmax = max(n, m)
elif mode == 'valid':
newmax = max(m, n) - min(m, n) + 1
else:
newmax = n+m-1
newindex = arange(0, newmax)
return self.map(lambda x: convolve(x, signal, mode), index=newindex) | python | def convolve(self, signal, mode='full'):
"""
Convolve series data against another signal.
Parameters
----------
signal : array
Signal to convolve with (must be 1D)
mode : str, optional, default='full'
Mode of convolution, options are 'full', 'same', and 'valid'
"""
from numpy import convolve
s = asarray(signal)
n = size(self.index)
m = size(s)
# use expected lengths to make a new index
if mode == 'same':
newmax = max(n, m)
elif mode == 'valid':
newmax = max(m, n) - min(m, n) + 1
else:
newmax = n+m-1
newindex = arange(0, newmax)
return self.map(lambda x: convolve(x, signal, mode), index=newindex) | ['def', 'convolve', '(', 'self', ',', 'signal', ',', 'mode', '=', "'full'", ')', ':', 'from', 'numpy', 'import', 'convolve', 's', '=', 'asarray', '(', 'signal', ')', 'n', '=', 'size', '(', 'self', '.', 'index', ')', 'm', '=', 'size', '(', 's', ')', '# use expected lengths to make a new index', 'if', 'mode', '==', "'same'", ':', 'newmax', '=', 'max', '(', 'n', ',', 'm', ')', 'elif', 'mode', '==', "'valid'", ':', 'newmax', '=', 'max', '(', 'm', ',', 'n', ')', '-', 'min', '(', 'm', ',', 'n', ')', '+', '1', 'else', ':', 'newmax', '=', 'n', '+', 'm', '-', '1', 'newindex', '=', 'arange', '(', '0', ',', 'newmax', ')', 'return', 'self', '.', 'map', '(', 'lambda', 'x', ':', 'convolve', '(', 'x', ',', 'signal', ',', 'mode', ')', ',', 'index', '=', 'newindex', ')'] | Convolve series data against another signal.
Parameters
----------
signal : array
Signal to convolve with (must be 1D)
mode : str, optional, default='full'
Mode of convolution, options are 'full', 'same', and 'valid' | ['Convolve', 'series', 'data', 'against', 'another', 'signal', '.'] | train | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L914-L943 |
6,032 | pyQode/pyqode.core | examples/notepad/notepad/main_window.py | MainWindow.on_save_as | def on_save_as(self):
"""
Save the current editor document as.
"""
self.tabWidget.save_current_as()
self._update_status_bar(self.tabWidget.current_widget()) | python | def on_save_as(self):
"""
Save the current editor document as.
"""
self.tabWidget.save_current_as()
self._update_status_bar(self.tabWidget.current_widget()) | ['def', 'on_save_as', '(', 'self', ')', ':', 'self', '.', 'tabWidget', '.', 'save_current_as', '(', ')', 'self', '.', '_update_status_bar', '(', 'self', '.', 'tabWidget', '.', 'current_widget', '(', ')', ')'] | Save the current editor document as. | ['Save', 'the', 'current', 'editor', 'document', 'as', '.'] | train | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/examples/notepad/notepad/main_window.py#L181-L186 |
6,033 | saltstack/salt | salt/modules/nfs3.py | reload_exports | def reload_exports():
'''
Trigger a reload of the exports file to apply changes
CLI Example:
.. code-block:: bash
salt '*' nfs3.reload_exports
'''
ret = {}
command = 'exportfs -r'
output = __salt__['cmd.run_all'](command)
ret['stdout'] = output['stdout']
ret['stderr'] = output['stderr']
# exportfs always returns 0, so retcode is useless
# We will consider it an error if stderr is nonempty
ret['result'] = output['stderr'] == ''
return ret | python | def reload_exports():
'''
Trigger a reload of the exports file to apply changes
CLI Example:
.. code-block:: bash
salt '*' nfs3.reload_exports
'''
ret = {}
command = 'exportfs -r'
output = __salt__['cmd.run_all'](command)
ret['stdout'] = output['stdout']
ret['stderr'] = output['stderr']
# exportfs always returns 0, so retcode is useless
# We will consider it an error if stderr is nonempty
ret['result'] = output['stderr'] == ''
return ret | ['def', 'reload_exports', '(', ')', ':', 'ret', '=', '{', '}', 'command', '=', "'exportfs -r'", 'output', '=', '__salt__', '[', "'cmd.run_all'", ']', '(', 'command', ')', 'ret', '[', "'stdout'", ']', '=', 'output', '[', "'stdout'", ']', 'ret', '[', "'stderr'", ']', '=', 'output', '[', "'stderr'", ']', '# exportfs always returns 0, so retcode is useless', '# We will consider it an error if stderr is nonempty', 'ret', '[', "'result'", ']', '=', 'output', '[', "'stderr'", ']', '==', "''", 'return', 'ret'] | Trigger a reload of the exports file to apply changes
CLI Example:
.. code-block:: bash
salt '*' nfs3.reload_exports | ['Trigger', 'a', 'reload', 'of', 'the', 'exports', 'file', 'to', 'apply', 'changes'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nfs3.py#L133-L154 |
6,034 | Azure/azure-cosmos-python | azure/cosmos/consistent_hash_ring.py | _ConsistentHashRing._GetSerializedPartitionList | def _GetSerializedPartitionList(self):
"""Gets the serialized version of the ConsistentRing.
Added this helper for the test code.
"""
partition_list = list()
for part in self.partitions:
partition_list.append((part.node, unpack("<L", part.hash_value)[0]))
return partition_list | python | def _GetSerializedPartitionList(self):
"""Gets the serialized version of the ConsistentRing.
Added this helper for the test code.
"""
partition_list = list()
for part in self.partitions:
partition_list.append((part.node, unpack("<L", part.hash_value)[0]))
return partition_list | ['def', '_GetSerializedPartitionList', '(', 'self', ')', ':', 'partition_list', '=', 'list', '(', ')', 'for', 'part', 'in', 'self', '.', 'partitions', ':', 'partition_list', '.', 'append', '(', '(', 'part', '.', 'node', ',', 'unpack', '(', '"<L"', ',', 'part', '.', 'hash_value', ')', '[', '0', ']', ')', ')', 'return', 'partition_list'] | Gets the serialized version of the ConsistentRing.
Added this helper for the test code. | ['Gets', 'the', 'serialized', 'version', 'of', 'the', 'ConsistentRing', '.', 'Added', 'this', 'helper', 'for', 'the', 'test', 'code', '.'] | train | https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/consistent_hash_ring.py#L99-L108 |
6,035 | jkwill87/mapi | mapi/providers.py | Provider._year_expand | def _year_expand(s):
""" Parses a year or dash-delimeted year range
"""
regex = r"^((?:19|20)\d{2})?(\s*-\s*)?((?:19|20)\d{2})?$"
try:
start, dash, end = match(regex, ustr(s)).groups()
start = start or 1900
end = end or 2099
except AttributeError:
return 1900, 2099
return (int(start), int(end)) if dash else (int(start), int(start)) | python | def _year_expand(s):
""" Parses a year or dash-delimeted year range
"""
regex = r"^((?:19|20)\d{2})?(\s*-\s*)?((?:19|20)\d{2})?$"
try:
start, dash, end = match(regex, ustr(s)).groups()
start = start or 1900
end = end or 2099
except AttributeError:
return 1900, 2099
return (int(start), int(end)) if dash else (int(start), int(start)) | ['def', '_year_expand', '(', 's', ')', ':', 'regex', '=', 'r"^((?:19|20)\\d{2})?(\\s*-\\s*)?((?:19|20)\\d{2})?$"', 'try', ':', 'start', ',', 'dash', ',', 'end', '=', 'match', '(', 'regex', ',', 'ustr', '(', 's', ')', ')', '.', 'groups', '(', ')', 'start', '=', 'start', 'or', '1900', 'end', '=', 'end', 'or', '2099', 'except', 'AttributeError', ':', 'return', '1900', ',', '2099', 'return', '(', 'int', '(', 'start', ')', ',', 'int', '(', 'end', ')', ')', 'if', 'dash', 'else', '(', 'int', '(', 'start', ')', ',', 'int', '(', 'start', ')', ')'] | Parses a year or dash-delimeted year range | ['Parses', 'a', 'year', 'or', 'dash', '-', 'delimeted', 'year', 'range'] | train | https://github.com/jkwill87/mapi/blob/730bf57c12aecaf49e18c15bf2b35af7f554b3cc/mapi/providers.py#L68-L78 |
6,036 | hover2pi/svo_filters | svo_filters/svo.py | Filter.rsr | def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr | python | def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr | ['def', 'rsr', '(', 'self', ')', ':', 'arr', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'wave', '.', 'value', ',', 'self', '.', 'throughput', ']', ')', '.', 'swapaxes', '(', '0', ',', '1', ')', 'return', 'arr'] | A getter for the relative spectral response (rsr) curve | ['A', 'getter', 'for', 'the', 'relative', 'spectral', 'response', '(', 'rsr', ')', 'curve'] | train | https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L680-L684 |
6,037 | jopohl/urh | src/urh/controller/GeneratorTabController.py | GeneratorTabController.bootstrap_modulator | def bootstrap_modulator(self, protocol: ProtocolAnalyzer):
"""
Set initial parameters for default modulator if it was not edited by user previously
:return:
"""
if len(self.modulators) != 1 or len(self.table_model.protocol.messages) == 0:
return
modulator = self.modulators[0]
modulator.samples_per_bit = protocol.messages[0].bit_len
if protocol.signal:
modulator.sample_rate = protocol.signal.sample_rate
modulator.modulation_type = protocol.signal.modulation_type
auto_freq = modulator.estimate_carrier_frequency(protocol.signal, protocol)
if auto_freq is not None and auto_freq != 0:
modulator.carrier_freq_hz = auto_freq
self.show_modulation_info() | python | def bootstrap_modulator(self, protocol: ProtocolAnalyzer):
"""
Set initial parameters for default modulator if it was not edited by user previously
:return:
"""
if len(self.modulators) != 1 or len(self.table_model.protocol.messages) == 0:
return
modulator = self.modulators[0]
modulator.samples_per_bit = protocol.messages[0].bit_len
if protocol.signal:
modulator.sample_rate = protocol.signal.sample_rate
modulator.modulation_type = protocol.signal.modulation_type
auto_freq = modulator.estimate_carrier_frequency(protocol.signal, protocol)
if auto_freq is not None and auto_freq != 0:
modulator.carrier_freq_hz = auto_freq
self.show_modulation_info() | ['def', 'bootstrap_modulator', '(', 'self', ',', 'protocol', ':', 'ProtocolAnalyzer', ')', ':', 'if', 'len', '(', 'self', '.', 'modulators', ')', '!=', '1', 'or', 'len', '(', 'self', '.', 'table_model', '.', 'protocol', '.', 'messages', ')', '==', '0', ':', 'return', 'modulator', '=', 'self', '.', 'modulators', '[', '0', ']', 'modulator', '.', 'samples_per_bit', '=', 'protocol', '.', 'messages', '[', '0', ']', '.', 'bit_len', 'if', 'protocol', '.', 'signal', ':', 'modulator', '.', 'sample_rate', '=', 'protocol', '.', 'signal', '.', 'sample_rate', 'modulator', '.', 'modulation_type', '=', 'protocol', '.', 'signal', '.', 'modulation_type', 'auto_freq', '=', 'modulator', '.', 'estimate_carrier_frequency', '(', 'protocol', '.', 'signal', ',', 'protocol', ')', 'if', 'auto_freq', 'is', 'not', 'None', 'and', 'auto_freq', '!=', '0', ':', 'modulator', '.', 'carrier_freq_hz', '=', 'auto_freq', 'self', '.', 'show_modulation_info', '(', ')'] | Set initial parameters for default modulator if it was not edited by user previously
:return: | ['Set', 'initial', 'parameters', 'for', 'default', 'modulator', 'if', 'it', 'was', 'not', 'edited', 'by', 'user', 'previously', ':', 'return', ':'] | train | https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/controller/GeneratorTabController.py#L206-L224 |
6,038 | KE-works/pykechain | pykechain/client.py | Client.activity | def activity(self, *args, **kwargs):
# type: (*Any, **Any) -> Activity
"""Search for a single activity.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param pk: id (primary key) of the activity to retrieve
:type pk: basestring or None
:param name: filter the activities by name
:type name: basestring or None
:param scope: filter by scope id
:type scope: basestring or None
:return: a single :class:`models.Activity`
:raises NotFoundError: When no `Activity` is found
:raises MultipleFoundError: When more than a single `Activity` is found
"""
_activities = self.activities(*args, **kwargs)
if len(_activities) == 0:
raise NotFoundError("No activity fits criteria")
if len(_activities) != 1:
raise MultipleFoundError("Multiple activities fit criteria")
return _activities[0] | python | def activity(self, *args, **kwargs):
# type: (*Any, **Any) -> Activity
"""Search for a single activity.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param pk: id (primary key) of the activity to retrieve
:type pk: basestring or None
:param name: filter the activities by name
:type name: basestring or None
:param scope: filter by scope id
:type scope: basestring or None
:return: a single :class:`models.Activity`
:raises NotFoundError: When no `Activity` is found
:raises MultipleFoundError: When more than a single `Activity` is found
"""
_activities = self.activities(*args, **kwargs)
if len(_activities) == 0:
raise NotFoundError("No activity fits criteria")
if len(_activities) != 1:
raise MultipleFoundError("Multiple activities fit criteria")
return _activities[0] | ['def', 'activity', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# type: (*Any, **Any) -> Activity', '_activities', '=', 'self', '.', 'activities', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'len', '(', '_activities', ')', '==', '0', ':', 'raise', 'NotFoundError', '(', '"No activity fits criteria"', ')', 'if', 'len', '(', '_activities', ')', '!=', '1', ':', 'raise', 'MultipleFoundError', '(', '"Multiple activities fit criteria"', ')', 'return', '_activities', '[', '0', ']'] | Search for a single activity.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param pk: id (primary key) of the activity to retrieve
:type pk: basestring or None
:param name: filter the activities by name
:type name: basestring or None
:param scope: filter by scope id
:type scope: basestring or None
:return: a single :class:`models.Activity`
:raises NotFoundError: When no `Activity` is found
:raises MultipleFoundError: When more than a single `Activity` is found | ['Search', 'for', 'a', 'single', 'activity', '.'] | train | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/client.py#L427-L451 |
6,039 | rosenbrockc/fortpy | fortpy/interop/converter.py | FileTemplate._line | def _line(self, element):
"""Parses the XML element as a single line entry in the input file."""
for v in _get_xml_version(element):
if "id" in element.attrib:
tline = TemplateLine(element, None, self.versions[v].comment)
self.versions[v].entries[tline.identifier] = tline
self.versions[v].order.append(tline.identifier)
else:
msg.warn("no id element in {}. Ignored. (_line)".format(element)) | python | def _line(self, element):
"""Parses the XML element as a single line entry in the input file."""
for v in _get_xml_version(element):
if "id" in element.attrib:
tline = TemplateLine(element, None, self.versions[v].comment)
self.versions[v].entries[tline.identifier] = tline
self.versions[v].order.append(tline.identifier)
else:
msg.warn("no id element in {}. Ignored. (_line)".format(element)) | ['def', '_line', '(', 'self', ',', 'element', ')', ':', 'for', 'v', 'in', '_get_xml_version', '(', 'element', ')', ':', 'if', '"id"', 'in', 'element', '.', 'attrib', ':', 'tline', '=', 'TemplateLine', '(', 'element', ',', 'None', ',', 'self', '.', 'versions', '[', 'v', ']', '.', 'comment', ')', 'self', '.', 'versions', '[', 'v', ']', '.', 'entries', '[', 'tline', '.', 'identifier', ']', '=', 'tline', 'self', '.', 'versions', '[', 'v', ']', '.', 'order', '.', 'append', '(', 'tline', '.', 'identifier', ')', 'else', ':', 'msg', '.', 'warn', '(', '"no id element in {}. Ignored. (_line)"', '.', 'format', '(', 'element', ')', ')'] | Parses the XML element as a single line entry in the input file. | ['Parses', 'the', 'XML', 'element', 'as', 'a', 'single', 'line', 'entry', 'in', 'the', 'input', 'file', '.'] | train | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/interop/converter.py#L359-L367 |
6,040 | Shapeways/coyote_framework | coyote_framework/mixins/URLValidator.py | validate_urls | def validate_urls(urls, allowed_response_codes=None):
"""Validates that a list of urls can be opened and each responds with an allowed response code
urls -- the list of urls to ping
allowed_response_codes -- a list of response codes that the validator will ignore
"""
for url in urls:
validate_url(url, allowed_response_codes=allowed_response_codes)
return True | python | def validate_urls(urls, allowed_response_codes=None):
"""Validates that a list of urls can be opened and each responds with an allowed response code
urls -- the list of urls to ping
allowed_response_codes -- a list of response codes that the validator will ignore
"""
for url in urls:
validate_url(url, allowed_response_codes=allowed_response_codes)
return True | ['def', 'validate_urls', '(', 'urls', ',', 'allowed_response_codes', '=', 'None', ')', ':', 'for', 'url', 'in', 'urls', ':', 'validate_url', '(', 'url', ',', 'allowed_response_codes', '=', 'allowed_response_codes', ')', 'return', 'True'] | Validates that a list of urls can be opened and each responds with an allowed response code
urls -- the list of urls to ping
allowed_response_codes -- a list of response codes that the validator will ignore | ['Validates', 'that', 'a', 'list', 'of', 'urls', 'can', 'be', 'opened', 'and', 'each', 'responds', 'with', 'an', 'allowed', 'response', 'code'] | train | https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/mixins/URLValidator.py#L29-L38 |
6,041 | pypa/pipenv | pipenv/vendor/distlib/_backport/tarfile.py | TarFile.getmember | def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo | python | def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo | ['def', 'getmember', '(', 'self', ',', 'name', ')', ':', 'tarinfo', '=', 'self', '.', '_getmember', '(', 'name', ')', 'if', 'tarinfo', 'is', 'None', ':', 'raise', 'KeyError', '(', '"filename %r not found"', '%', 'name', ')', 'return', 'tarinfo'] | Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version. | ['Return', 'a', 'TarInfo', 'object', 'for', 'member', 'name', '.', 'If', 'name', 'can', 'not', 'be', 'found', 'in', 'the', 'archive', 'KeyError', 'is', 'raised', '.', 'If', 'a', 'member', 'occurs', 'more', 'than', 'once', 'in', 'the', 'archive', 'its', 'last', 'occurrence', 'is', 'assumed', 'to', 'be', 'the', 'most', 'up', '-', 'to', '-', 'date', 'version', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1884-L1893 |
6,042 | nerdvegas/rez | src/rez/utils/platform_.py | Platform.physical_cores | def physical_cores(self):
"""Return the number of physical cpu cores on the system."""
try:
return self._physical_cores_base()
except Exception as e:
from rez.utils.logging_ import print_error
print_error("Error detecting physical core count, defaulting to 1: %s"
% str(e))
return 1 | python | def physical_cores(self):
"""Return the number of physical cpu cores on the system."""
try:
return self._physical_cores_base()
except Exception as e:
from rez.utils.logging_ import print_error
print_error("Error detecting physical core count, defaulting to 1: %s"
% str(e))
return 1 | ['def', 'physical_cores', '(', 'self', ')', ':', 'try', ':', 'return', 'self', '.', '_physical_cores_base', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'from', 'rez', '.', 'utils', '.', 'logging_', 'import', 'print_error', 'print_error', '(', '"Error detecting physical core count, defaulting to 1: %s"', '%', 'str', '(', 'e', ')', ')', 'return', '1'] | Return the number of physical cpu cores on the system. | ['Return', 'the', 'number', 'of', 'physical', 'cpu', 'cores', 'on', 'the', 'system', '.'] | train | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/platform_.py#L82-L90 |
6,043 | tensorflow/datasets | tensorflow_datasets/core/api_utils.py | disallow_positional_args | def disallow_positional_args(wrapped=None, allowed=None):
"""Requires function to be called using keyword arguments."""
# See
# https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments
# for decorator pattern.
if wrapped is None:
return functools.partial(disallow_positional_args, allowed=allowed)
@wrapt.decorator
def disallow_positional_args_dec(fn, instance, args, kwargs):
ismethod = instance is not None
_check_no_positional(fn, args, ismethod, allowed=allowed)
_check_required(fn, kwargs)
return fn(*args, **kwargs)
return disallow_positional_args_dec(wrapped) | python | def disallow_positional_args(wrapped=None, allowed=None):
"""Requires function to be called using keyword arguments."""
# See
# https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments
# for decorator pattern.
if wrapped is None:
return functools.partial(disallow_positional_args, allowed=allowed)
@wrapt.decorator
def disallow_positional_args_dec(fn, instance, args, kwargs):
ismethod = instance is not None
_check_no_positional(fn, args, ismethod, allowed=allowed)
_check_required(fn, kwargs)
return fn(*args, **kwargs)
return disallow_positional_args_dec(wrapped) | ['def', 'disallow_positional_args', '(', 'wrapped', '=', 'None', ',', 'allowed', '=', 'None', ')', ':', '# See', '# https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments', '# for decorator pattern.', 'if', 'wrapped', 'is', 'None', ':', 'return', 'functools', '.', 'partial', '(', 'disallow_positional_args', ',', 'allowed', '=', 'allowed', ')', '@', 'wrapt', '.', 'decorator', 'def', 'disallow_positional_args_dec', '(', 'fn', ',', 'instance', ',', 'args', ',', 'kwargs', ')', ':', 'ismethod', '=', 'instance', 'is', 'not', 'None', '_check_no_positional', '(', 'fn', ',', 'args', ',', 'ismethod', ',', 'allowed', '=', 'allowed', ')', '_check_required', '(', 'fn', ',', 'kwargs', ')', 'return', 'fn', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'disallow_positional_args_dec', '(', 'wrapped', ')'] | Requires function to be called using keyword arguments. | ['Requires', 'function', 'to', 'be', 'called', 'using', 'keyword', 'arguments', '.'] | train | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/api_utils.py#L39-L54 |
6,044 | Gandi/gandi.cli | gandi/cli/modules/network.py | Iface._detach | def _detach(cls, iface_id):
""" Detach an iface from a vm. """
iface = cls._info(iface_id)
opers = []
vm_id = iface.get('vm_id')
if vm_id:
cls.echo('The iface is still attached to the vm %s.' % vm_id)
cls.echo('Will detach it.')
opers.append(cls.call('hosting.vm.iface_detach', vm_id, iface_id))
return opers | python | def _detach(cls, iface_id):
""" Detach an iface from a vm. """
iface = cls._info(iface_id)
opers = []
vm_id = iface.get('vm_id')
if vm_id:
cls.echo('The iface is still attached to the vm %s.' % vm_id)
cls.echo('Will detach it.')
opers.append(cls.call('hosting.vm.iface_detach', vm_id, iface_id))
return opers | ['def', '_detach', '(', 'cls', ',', 'iface_id', ')', ':', 'iface', '=', 'cls', '.', '_info', '(', 'iface_id', ')', 'opers', '=', '[', ']', 'vm_id', '=', 'iface', '.', 'get', '(', "'vm_id'", ')', 'if', 'vm_id', ':', 'cls', '.', 'echo', '(', "'The iface is still attached to the vm %s.'", '%', 'vm_id', ')', 'cls', '.', 'echo', '(', "'Will detach it.'", ')', 'opers', '.', 'append', '(', 'cls', '.', 'call', '(', "'hosting.vm.iface_detach'", ',', 'vm_id', ',', 'iface_id', ')', ')', 'return', 'opers'] | Detach an iface from a vm. | ['Detach', 'an', 'iface', 'from', 'a', 'vm', '.'] | train | https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/network.py#L390-L399 |
6,045 | rootpy/rootpy | rootpy/context.py | thread_specific_tmprootdir | def thread_specific_tmprootdir():
"""
Context manager which makes a thread specific gDirectory to avoid
interfering with the current file.
Use cases:
A TTree Draw function which doesn't want to interfere with whatever
gDirectory happens to be.
Multi-threading where there are two threads creating objects with the
same name which must reside in a directory. (again, this happens with
TTree draw)
"""
with preserve_current_directory():
dname = "rootpy-tmp/thread/{0}".format(
threading.current_thread().ident)
d = ROOT.gROOT.mkdir(dname)
if not d:
d = ROOT.gROOT.GetDirectory(dname)
assert d, "Unexpected failure, can't cd to tmpdir."
d.cd()
yield d | python | def thread_specific_tmprootdir():
"""
Context manager which makes a thread specific gDirectory to avoid
interfering with the current file.
Use cases:
A TTree Draw function which doesn't want to interfere with whatever
gDirectory happens to be.
Multi-threading where there are two threads creating objects with the
same name which must reside in a directory. (again, this happens with
TTree draw)
"""
with preserve_current_directory():
dname = "rootpy-tmp/thread/{0}".format(
threading.current_thread().ident)
d = ROOT.gROOT.mkdir(dname)
if not d:
d = ROOT.gROOT.GetDirectory(dname)
assert d, "Unexpected failure, can't cd to tmpdir."
d.cd()
yield d | ['def', 'thread_specific_tmprootdir', '(', ')', ':', 'with', 'preserve_current_directory', '(', ')', ':', 'dname', '=', '"rootpy-tmp/thread/{0}"', '.', 'format', '(', 'threading', '.', 'current_thread', '(', ')', '.', 'ident', ')', 'd', '=', 'ROOT', '.', 'gROOT', '.', 'mkdir', '(', 'dname', ')', 'if', 'not', 'd', ':', 'd', '=', 'ROOT', '.', 'gROOT', '.', 'GetDirectory', '(', 'dname', ')', 'assert', 'd', ',', '"Unexpected failure, can\'t cd to tmpdir."', 'd', '.', 'cd', '(', ')', 'yield', 'd'] | Context manager which makes a thread specific gDirectory to avoid
interfering with the current file.
Use cases:
A TTree Draw function which doesn't want to interfere with whatever
gDirectory happens to be.
Multi-threading where there are two threads creating objects with the
same name which must reside in a directory. (again, this happens with
TTree draw) | ['Context', 'manager', 'which', 'makes', 'a', 'thread', 'specific', 'gDirectory', 'to', 'avoid', 'interfering', 'with', 'the', 'current', 'file', '.'] | train | https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/context.py#L120-L142 |
6,046 | apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | xmlReg.regexpExec | def regexpExec(self, content):
"""Check if the regular expression generates the value """
ret = libxml2mod.xmlRegexpExec(self._o, content)
return ret | python | def regexpExec(self, content):
"""Check if the regular expression generates the value """
ret = libxml2mod.xmlRegexpExec(self._o, content)
return ret | ['def', 'regexpExec', '(', 'self', ',', 'content', ')', ':', 'ret', '=', 'libxml2mod', '.', 'xmlRegexpExec', '(', 'self', '.', '_o', ',', 'content', ')', 'return', 'ret'] | Check if the regular expression generates the value | ['Check', 'if', 'the', 'regular', 'expression', 'generates', 'the', 'value'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L6196-L6199 |
6,047 | rochacbruno/python-pagseguro | pagseguro/__init__.py | PagSeguro.check_notification | def check_notification(self, code):
""" check a notification by its code """
response = self.get(url=self.config.NOTIFICATION_URL % code)
return PagSeguroNotificationResponse(response.content, self.config) | python | def check_notification(self, code):
""" check a notification by its code """
response = self.get(url=self.config.NOTIFICATION_URL % code)
return PagSeguroNotificationResponse(response.content, self.config) | ['def', 'check_notification', '(', 'self', ',', 'code', ')', ':', 'response', '=', 'self', '.', 'get', '(', 'url', '=', 'self', '.', 'config', '.', 'NOTIFICATION_URL', '%', 'code', ')', 'return', 'PagSeguroNotificationResponse', '(', 'response', '.', 'content', ',', 'self', '.', 'config', ')'] | check a notification by its code | ['check', 'a', 'notification', 'by', 'its', 'code'] | train | https://github.com/rochacbruno/python-pagseguro/blob/18a9ca3301783cb323e838574b59f9ddffa9a593/pagseguro/__init__.py#L232-L235 |
6,048 | ambitioninc/django-db-mutex | db_mutex/db_mutex.py | db_mutex.start | def start(self):
"""
Acquires the db mutex lock. Takes the necessary steps to delete any stale locks.
Throws a DBMutexError if it can't acquire the lock.
"""
# Delete any expired locks first
self.delete_expired_locks()
try:
with transaction.atomic():
self.lock = DBMutex.objects.create(lock_id=self.lock_id)
except IntegrityError:
raise DBMutexError('Could not acquire lock: {0}'.format(self.lock_id)) | python | def start(self):
"""
Acquires the db mutex lock. Takes the necessary steps to delete any stale locks.
Throws a DBMutexError if it can't acquire the lock.
"""
# Delete any expired locks first
self.delete_expired_locks()
try:
with transaction.atomic():
self.lock = DBMutex.objects.create(lock_id=self.lock_id)
except IntegrityError:
raise DBMutexError('Could not acquire lock: {0}'.format(self.lock_id)) | ['def', 'start', '(', 'self', ')', ':', '# Delete any expired locks first', 'self', '.', 'delete_expired_locks', '(', ')', 'try', ':', 'with', 'transaction', '.', 'atomic', '(', ')', ':', 'self', '.', 'lock', '=', 'DBMutex', '.', 'objects', '.', 'create', '(', 'lock_id', '=', 'self', '.', 'lock_id', ')', 'except', 'IntegrityError', ':', 'raise', 'DBMutexError', '(', "'Could not acquire lock: {0}'", '.', 'format', '(', 'self', '.', 'lock_id', ')', ')'] | Acquires the db mutex lock. Takes the necessary steps to delete any stale locks.
Throws a DBMutexError if it can't acquire the lock. | ['Acquires', 'the', 'db', 'mutex', 'lock', '.', 'Takes', 'the', 'necessary', 'steps', 'to', 'delete', 'any', 'stale', 'locks', '.', 'Throws', 'a', 'DBMutexError', 'if', 'it', 'can', 't', 'acquire', 'the', 'lock', '.'] | train | https://github.com/ambitioninc/django-db-mutex/blob/7112ed202fa3135afbb280273c1bdc8dee4e8426/db_mutex/db_mutex.py#L98-L109 |
6,049 | blockstack/virtualchain | virtualchain/lib/blockchain/bitcoin_blockchain/bits.py | btc_tx_get_hash | def btc_tx_get_hash( tx_serialized, hashcode=None ):
"""
Make a transaction hash (txid) from a hex tx, optionally along with a sighash.
This DOES NOT WORK for segwit transactions
"""
if btc_tx_is_segwit(tx_serialized):
raise ValueError('Segwit transaction: {}'.format(tx_serialized))
tx_bin = binascii.unhexlify(tx_serialized)
if hashcode:
return binascii.hexlify( hashing.bin_double_sha256(tx_bin + encoding.encode(int(hashcode), 256, 4)[::-1]) )
else:
return binascii.hexlify( hashing.bin_double_sha256(tx_bin)[::-1] ) | python | def btc_tx_get_hash( tx_serialized, hashcode=None ):
"""
Make a transaction hash (txid) from a hex tx, optionally along with a sighash.
This DOES NOT WORK for segwit transactions
"""
if btc_tx_is_segwit(tx_serialized):
raise ValueError('Segwit transaction: {}'.format(tx_serialized))
tx_bin = binascii.unhexlify(tx_serialized)
if hashcode:
return binascii.hexlify( hashing.bin_double_sha256(tx_bin + encoding.encode(int(hashcode), 256, 4)[::-1]) )
else:
return binascii.hexlify( hashing.bin_double_sha256(tx_bin)[::-1] ) | ['def', 'btc_tx_get_hash', '(', 'tx_serialized', ',', 'hashcode', '=', 'None', ')', ':', 'if', 'btc_tx_is_segwit', '(', 'tx_serialized', ')', ':', 'raise', 'ValueError', '(', "'Segwit transaction: {}'", '.', 'format', '(', 'tx_serialized', ')', ')', 'tx_bin', '=', 'binascii', '.', 'unhexlify', '(', 'tx_serialized', ')', 'if', 'hashcode', ':', 'return', 'binascii', '.', 'hexlify', '(', 'hashing', '.', 'bin_double_sha256', '(', 'tx_bin', '+', 'encoding', '.', 'encode', '(', 'int', '(', 'hashcode', ')', ',', '256', ',', '4', ')', '[', ':', ':', '-', '1', ']', ')', ')', 'else', ':', 'return', 'binascii', '.', 'hexlify', '(', 'hashing', '.', 'bin_double_sha256', '(', 'tx_bin', ')', '[', ':', ':', '-', '1', ']', ')'] | Make a transaction hash (txid) from a hex tx, optionally along with a sighash.
This DOES NOT WORK for segwit transactions | ['Make', 'a', 'transaction', 'hash', '(', 'txid', ')', 'from', 'a', 'hex', 'tx', 'optionally', 'along', 'with', 'a', 'sighash', '.', 'This', 'DOES', 'NOT', 'WORK', 'for', 'segwit', 'transactions'] | train | https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/bits.py#L509-L522 |
6,050 | HumanCellAtlas/dcp-cli | hca/upload/lib/api_client.py | ApiClient.validation_statuses | def validation_statuses(self, area_uuid):
"""
Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
path = "/area/{uuid}/validations".format(uuid=area_uuid)
result = self._make_request('get', path)
return result.json() | python | def validation_statuses(self, area_uuid):
"""
Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
path = "/area/{uuid}/validations".format(uuid=area_uuid)
result = self._make_request('get', path)
return result.json() | ['def', 'validation_statuses', '(', 'self', ',', 'area_uuid', ')', ':', 'path', '=', '"/area/{uuid}/validations"', '.', 'format', '(', 'uuid', '=', 'area_uuid', ')', 'result', '=', 'self', '.', '_make_request', '(', "'get'", ',', 'path', ')', 'return', 'result', '.', 'json', '(', ')'] | Get count of validation statuses for all files in upload_area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:return: a dict with key for each state and value being the count of files in that state
:rtype: dict
:raises UploadApiException: if information could not be obtained | ['Get', 'count', 'of', 'validation', 'statuses', 'for', 'all', 'files', 'in', 'upload_area'] | train | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/lib/api_client.py#L214-L225 |
6,051 | gwastro/pycbc | pycbc/inference/models/gaussian_noise.py | GaussianNoise._lognl | def _lognl(self):
"""Computes the log likelihood assuming the data is noise.
Since this is a constant for Gaussian noise, this is only computed once
then stored.
"""
try:
return self.__lognl
except AttributeError:
det_lognls = {}
for (det, d) in self._data.items():
kmin = self._kmin
kmax = self._kmax
det_lognls[det] = -0.5 * d[kmin:kmax].inner(d[kmin:kmax]).real
self.__det_lognls = det_lognls
self.__lognl = sum(det_lognls.values())
return self.__lognl | python | def _lognl(self):
"""Computes the log likelihood assuming the data is noise.
Since this is a constant for Gaussian noise, this is only computed once
then stored.
"""
try:
return self.__lognl
except AttributeError:
det_lognls = {}
for (det, d) in self._data.items():
kmin = self._kmin
kmax = self._kmax
det_lognls[det] = -0.5 * d[kmin:kmax].inner(d[kmin:kmax]).real
self.__det_lognls = det_lognls
self.__lognl = sum(det_lognls.values())
return self.__lognl | ['def', '_lognl', '(', 'self', ')', ':', 'try', ':', 'return', 'self', '.', '__lognl', 'except', 'AttributeError', ':', 'det_lognls', '=', '{', '}', 'for', '(', 'det', ',', 'd', ')', 'in', 'self', '.', '_data', '.', 'items', '(', ')', ':', 'kmin', '=', 'self', '.', '_kmin', 'kmax', '=', 'self', '.', '_kmax', 'det_lognls', '[', 'det', ']', '=', '-', '0.5', '*', 'd', '[', 'kmin', ':', 'kmax', ']', '.', 'inner', '(', 'd', '[', 'kmin', ':', 'kmax', ']', ')', '.', 'real', 'self', '.', '__det_lognls', '=', 'det_lognls', 'self', '.', '__lognl', '=', 'sum', '(', 'det_lognls', '.', 'values', '(', ')', ')', 'return', 'self', '.', '__lognl'] | Computes the log likelihood assuming the data is noise.
Since this is a constant for Gaussian noise, this is only computed once
then stored. | ['Computes', 'the', 'log', 'likelihood', 'assuming', 'the', 'data', 'is', 'noise', '.'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/gaussian_noise.py#L284-L300 |
6,052 | scikit-hep/uproot | uproot/rootio.py | TKey.get | def get(self, dismiss=True):
"""Extract the object this key points to.
Objects are not read or decompressed until this function is explicitly called.
"""
try:
return _classof(self._context, self._fClassName).read(self._source, self._cursor.copied(), self._context, self)
finally:
if dismiss:
self._source.dismiss() | python | def get(self, dismiss=True):
"""Extract the object this key points to.
Objects are not read or decompressed until this function is explicitly called.
"""
try:
return _classof(self._context, self._fClassName).read(self._source, self._cursor.copied(), self._context, self)
finally:
if dismiss:
self._source.dismiss() | ['def', 'get', '(', 'self', ',', 'dismiss', '=', 'True', ')', ':', 'try', ':', 'return', '_classof', '(', 'self', '.', '_context', ',', 'self', '.', '_fClassName', ')', '.', 'read', '(', 'self', '.', '_source', ',', 'self', '.', '_cursor', '.', 'copied', '(', ')', ',', 'self', '.', '_context', ',', 'self', ')', 'finally', ':', 'if', 'dismiss', ':', 'self', '.', '_source', '.', 'dismiss', '(', ')'] | Extract the object this key points to.
Objects are not read or decompressed until this function is explicitly called. | ['Extract', 'the', 'object', 'this', 'key', 'points', 'to', '.'] | train | https://github.com/scikit-hep/uproot/blob/fc406827e36ed87cfb1062806e118f53fd3a3b0a/uproot/rootio.py#L883-L893 |
6,053 | rytilahti/python-eq3bt | eq3bt/eq3cli.py | cli | def cli(ctx, mac, debug):
""" Tool to query and modify the state of EQ3 BT smart thermostat. """
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
thermostat = Thermostat(mac)
thermostat.update()
ctx.obj = thermostat
if ctx.invoked_subcommand is None:
ctx.invoke(state) | python | def cli(ctx, mac, debug):
""" Tool to query and modify the state of EQ3 BT smart thermostat. """
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
thermostat = Thermostat(mac)
thermostat.update()
ctx.obj = thermostat
if ctx.invoked_subcommand is None:
ctx.invoke(state) | ['def', 'cli', '(', 'ctx', ',', 'mac', ',', 'debug', ')', ':', 'if', 'debug', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'DEBUG', ')', 'else', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ')', 'thermostat', '=', 'Thermostat', '(', 'mac', ')', 'thermostat', '.', 'update', '(', ')', 'ctx', '.', 'obj', '=', 'thermostat', 'if', 'ctx', '.', 'invoked_subcommand', 'is', 'None', ':', 'ctx', '.', 'invoke', '(', 'state', ')'] | Tool to query and modify the state of EQ3 BT smart thermostat. | ['Tool', 'to', 'query', 'and', 'modify', 'the', 'state', 'of', 'EQ3', 'BT', 'smart', 'thermostat', '.'] | train | https://github.com/rytilahti/python-eq3bt/blob/595459d9885920cf13b7059a1edd2cf38cede1f0/eq3bt/eq3cli.py#L26-L38 |
6,054 | cognitect/transit-python | transit/decoder.py | Decoder.decode_string | def decode_string(self, string, cache, as_map_key):
"""Decode a string - arguments follow the same convention as the
top-level 'decode' function.
"""
if is_cache_key(string):
return self.parse_string(cache.decode(string, as_map_key),
cache, as_map_key)
if is_cacheable(string, as_map_key):
cache.encode(string, as_map_key)
return self.parse_string(string, cache, as_map_key) | python | def decode_string(self, string, cache, as_map_key):
"""Decode a string - arguments follow the same convention as the
top-level 'decode' function.
"""
if is_cache_key(string):
return self.parse_string(cache.decode(string, as_map_key),
cache, as_map_key)
if is_cacheable(string, as_map_key):
cache.encode(string, as_map_key)
return self.parse_string(string, cache, as_map_key) | ['def', 'decode_string', '(', 'self', ',', 'string', ',', 'cache', ',', 'as_map_key', ')', ':', 'if', 'is_cache_key', '(', 'string', ')', ':', 'return', 'self', '.', 'parse_string', '(', 'cache', '.', 'decode', '(', 'string', ',', 'as_map_key', ')', ',', 'cache', ',', 'as_map_key', ')', 'if', 'is_cacheable', '(', 'string', ',', 'as_map_key', ')', ':', 'cache', '.', 'encode', '(', 'string', ',', 'as_map_key', ')', 'return', 'self', '.', 'parse_string', '(', 'string', ',', 'cache', ',', 'as_map_key', ')'] | Decode a string - arguments follow the same convention as the
top-level 'decode' function. | ['Decode', 'a', 'string', '-', 'arguments', 'follow', 'the', 'same', 'convention', 'as', 'the', 'top', '-', 'level', 'decode', 'function', '.'] | train | https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/decoder.py#L123-L132 |
6,055 | pygobject/pgi | pgi/overrides/__init__.py | strip_boolean_result | def strip_boolean_result(method, exc_type=None, exc_str=None, fail_ret=None):
"""Translate method's return value for stripping off success flag.
There are a lot of methods which return a "success" boolean and have
several out arguments. Translate such a method to return the out arguments
on success and None on failure.
"""
@wraps(method)
def wrapped(*args, **kwargs):
ret = method(*args, **kwargs)
if ret[0]:
if len(ret) == 2:
return ret[1]
else:
return ret[1:]
else:
if exc_type:
raise exc_type(exc_str or 'call failed')
return fail_ret
return wrapped | python | def strip_boolean_result(method, exc_type=None, exc_str=None, fail_ret=None):
"""Translate method's return value for stripping off success flag.
There are a lot of methods which return a "success" boolean and have
several out arguments. Translate such a method to return the out arguments
on success and None on failure.
"""
@wraps(method)
def wrapped(*args, **kwargs):
ret = method(*args, **kwargs)
if ret[0]:
if len(ret) == 2:
return ret[1]
else:
return ret[1:]
else:
if exc_type:
raise exc_type(exc_str or 'call failed')
return fail_ret
return wrapped | ['def', 'strip_boolean_result', '(', 'method', ',', 'exc_type', '=', 'None', ',', 'exc_str', '=', 'None', ',', 'fail_ret', '=', 'None', ')', ':', '@', 'wraps', '(', 'method', ')', 'def', 'wrapped', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'ret', '=', 'method', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'ret', '[', '0', ']', ':', 'if', 'len', '(', 'ret', ')', '==', '2', ':', 'return', 'ret', '[', '1', ']', 'else', ':', 'return', 'ret', '[', '1', ':', ']', 'else', ':', 'if', 'exc_type', ':', 'raise', 'exc_type', '(', 'exc_str', 'or', "'call failed'", ')', 'return', 'fail_ret', 'return', 'wrapped'] | Translate method's return value for stripping off success flag.
There are a lot of methods which return a "success" boolean and have
several out arguments. Translate such a method to return the out arguments
on success and None on failure. | ['Translate', 'method', 's', 'return', 'value', 'for', 'stripping', 'off', 'success', 'flag', '.'] | train | https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/overrides/__init__.py#L308-L327 |
6,056 | Azure/azure-cosmos-table-python | azure-cosmosdb-table/azure/cosmosdb/table/_request.py | _merge_entity | def _merge_entity(entity, if_match, require_encryption=False, key_encryption_key=None):
'''
Constructs a merge entity request.
'''
_validate_not_none('if_match', if_match)
_validate_entity(entity)
_validate_encryption_unsupported(require_encryption, key_encryption_key)
request = HTTPRequest()
request.method = 'MERGE'
request.headers = {
_DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1],
_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
'If-Match': _to_str(if_match)
}
request.body = _get_request_body(_convert_entity_to_json(entity))
return request | python | def _merge_entity(entity, if_match, require_encryption=False, key_encryption_key=None):
'''
Constructs a merge entity request.
'''
_validate_not_none('if_match', if_match)
_validate_entity(entity)
_validate_encryption_unsupported(require_encryption, key_encryption_key)
request = HTTPRequest()
request.method = 'MERGE'
request.headers = {
_DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1],
_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1],
'If-Match': _to_str(if_match)
}
request.body = _get_request_body(_convert_entity_to_json(entity))
return request | ['def', '_merge_entity', '(', 'entity', ',', 'if_match', ',', 'require_encryption', '=', 'False', ',', 'key_encryption_key', '=', 'None', ')', ':', '_validate_not_none', '(', "'if_match'", ',', 'if_match', ')', '_validate_entity', '(', 'entity', ')', '_validate_encryption_unsupported', '(', 'require_encryption', ',', 'key_encryption_key', ')', 'request', '=', 'HTTPRequest', '(', ')', 'request', '.', 'method', '=', "'MERGE'", 'request', '.', 'headers', '=', '{', '_DEFAULT_CONTENT_TYPE_HEADER', '[', '0', ']', ':', '_DEFAULT_CONTENT_TYPE_HEADER', '[', '1', ']', ',', '_DEFAULT_ACCEPT_HEADER', '[', '0', ']', ':', '_DEFAULT_ACCEPT_HEADER', '[', '1', ']', ',', "'If-Match'", ':', '_to_str', '(', 'if_match', ')', '}', 'request', '.', 'body', '=', '_get_request_body', '(', '_convert_entity_to_json', '(', 'entity', ')', ')', 'return', 'request'] | Constructs a merge entity request. | ['Constructs', 'a', 'merge', 'entity', 'request', '.'] | train | https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/_request.py#L121-L138 |
6,057 | mishbahr/django-usersettings2 | usersettings/shortcuts.py | get_usersettings_model | def get_usersettings_model():
"""
Returns the ``UserSettings`` model that is active in this project.
"""
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
try:
app_label, model_name = settings.USERSETTINGS_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured('USERSETTINGS_MODEL must be of the '
'form "app_label.model_name"')
usersettings_model = get_model(app_label, model_name)
if usersettings_model is None:
raise ImproperlyConfigured('USERSETTINGS_MODEL refers to model "%s" that has '
'not been installed' % settings.USERSETTINGS_MODEL)
return usersettings_model | python | def get_usersettings_model():
"""
Returns the ``UserSettings`` model that is active in this project.
"""
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
try:
app_label, model_name = settings.USERSETTINGS_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured('USERSETTINGS_MODEL must be of the '
'form "app_label.model_name"')
usersettings_model = get_model(app_label, model_name)
if usersettings_model is None:
raise ImproperlyConfigured('USERSETTINGS_MODEL refers to model "%s" that has '
'not been installed' % settings.USERSETTINGS_MODEL)
return usersettings_model | ['def', 'get_usersettings_model', '(', ')', ':', 'try', ':', 'from', 'django', '.', 'apps', 'import', 'apps', 'get_model', '=', 'apps', '.', 'get_model', 'except', 'ImportError', ':', 'from', 'django', '.', 'db', '.', 'models', '.', 'loading', 'import', 'get_model', 'try', ':', 'app_label', ',', 'model_name', '=', 'settings', '.', 'USERSETTINGS_MODEL', '.', 'split', '(', "'.'", ')', 'except', 'ValueError', ':', 'raise', 'ImproperlyConfigured', '(', "'USERSETTINGS_MODEL must be of the '", '\'form "app_label.model_name"\'', ')', 'usersettings_model', '=', 'get_model', '(', 'app_label', ',', 'model_name', ')', 'if', 'usersettings_model', 'is', 'None', ':', 'raise', 'ImproperlyConfigured', '(', '\'USERSETTINGS_MODEL refers to model "%s" that has \'', "'not been installed'", '%', 'settings', '.', 'USERSETTINGS_MODEL', ')', 'return', 'usersettings_model'] | Returns the ``UserSettings`` model that is active in this project. | ['Returns', 'the', 'UserSettings', 'model', 'that', 'is', 'active', 'in', 'this', 'project', '.'] | train | https://github.com/mishbahr/django-usersettings2/blob/cbc2f4b2e01d5401bec8a3fa39151730cd2dcd2a/usersettings/shortcuts.py#L5-L24 |
6,058 | AltSchool/dynamic-rest | dynamic_rest/fields/common.py | WithRelationalFieldMixin._get_request_fields_from_parent | def _get_request_fields_from_parent(self):
"""Get request fields from the parent serializer."""
if not self.parent:
return None
if not getattr(self.parent, 'request_fields'):
return None
if not isinstance(self.parent.request_fields, dict):
return None
return self.parent.request_fields.get(self.field_name) | python | def _get_request_fields_from_parent(self):
"""Get request fields from the parent serializer."""
if not self.parent:
return None
if not getattr(self.parent, 'request_fields'):
return None
if not isinstance(self.parent.request_fields, dict):
return None
return self.parent.request_fields.get(self.field_name) | ['def', '_get_request_fields_from_parent', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'parent', ':', 'return', 'None', 'if', 'not', 'getattr', '(', 'self', '.', 'parent', ',', "'request_fields'", ')', ':', 'return', 'None', 'if', 'not', 'isinstance', '(', 'self', '.', 'parent', '.', 'request_fields', ',', 'dict', ')', ':', 'return', 'None', 'return', 'self', '.', 'parent', '.', 'request_fields', '.', 'get', '(', 'self', '.', 'field_name', ')'] | Get request fields from the parent serializer. | ['Get', 'request', 'fields', 'from', 'the', 'parent', 'serializer', '.'] | train | https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/fields/common.py#L6-L17 |
6,059 | csparpa/pyowm | pyowm/weatherapi25/stationhistory.py | StationHistory.to_XML | def to_XML(self, xml_declaration=True, xmlns=True):
"""
Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string
"""
root_node = self._to_DOM()
if xmlns:
xmlutils.annotate_with_XMLNS(root_node,
STATION_HISTORY_XMLNS_PREFIX,
STATION_HISTORY_XMLNS_URL)
return xmlutils.DOM_node_to_XML(root_node, xml_declaration) | python | def to_XML(self, xml_declaration=True, xmlns=True):
"""
Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string
"""
root_node = self._to_DOM()
if xmlns:
xmlutils.annotate_with_XMLNS(root_node,
STATION_HISTORY_XMLNS_PREFIX,
STATION_HISTORY_XMLNS_URL)
return xmlutils.DOM_node_to_XML(root_node, xml_declaration) | ['def', 'to_XML', '(', 'self', ',', 'xml_declaration', '=', 'True', ',', 'xmlns', '=', 'True', ')', ':', 'root_node', '=', 'self', '.', '_to_DOM', '(', ')', 'if', 'xmlns', ':', 'xmlutils', '.', 'annotate_with_XMLNS', '(', 'root_node', ',', 'STATION_HISTORY_XMLNS_PREFIX', ',', 'STATION_HISTORY_XMLNS_URL', ')', 'return', 'xmlutils', '.', 'DOM_node_to_XML', '(', 'root_node', ',', 'xml_declaration', ')'] | Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string | ['Dumps', 'object', 'fields', 'to', 'an', 'XML', '-', 'formatted', 'string', '.', 'The', 'xml_declaration', 'switch', 'enables', 'printing', 'of', 'a', 'leading', 'standard', 'XML', 'line', 'containing', 'XML', 'version', 'and', 'encoding', '.', 'The', 'xmlns', 'switch', 'enables', 'printing', 'of', 'qualified', 'XMLNS', 'prefixes', '.'] | train | https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/stationhistory.py#L123-L143 |
6,060 | ipfs/py-ipfs-api | ipfsapi/multipart.py | DirectoryStream._prepare | def _prepare(self):
"""Pre-formats the multipart HTTP request to transmit the directory."""
names = []
added_directories = set()
def add_directory(short_path):
# Do not continue if this directory has already been added
if short_path in added_directories:
return
# Scan for first super-directory that has already been added
dir_base = short_path
dir_parts = []
while dir_base:
dir_base, dir_name = os.path.split(dir_base)
dir_parts.append(dir_name)
if dir_base in added_directories:
break
# Add missing intermediate directory nodes in the right order
while dir_parts:
dir_base = os.path.join(dir_base, dir_parts.pop())
# Create an empty, fake file to represent the directory
mock_file = io.StringIO()
mock_file.write(u'')
# Add this directory to those that will be sent
names.append(('files',
(dir_base.replace(os.sep, '/'), mock_file, 'application/x-directory')))
# Remember that this directory has already been sent
added_directories.add(dir_base)
def add_file(short_path, full_path):
try:
# Always add files in wildcard directories
names.append(('files', (short_name.replace(os.sep, '/'),
open(full_path, 'rb'),
'application/octet-stream')))
except OSError:
# File might have disappeared between `os.walk()` and `open()`
pass
def match_short_path(short_path):
# Remove initial path component so that all files are based in
# the target directory itself (not one level above)
if os.sep in short_path:
path = short_path.split(os.sep, 1)[1]
else:
return False
# Convert all path seperators to POSIX style
path = path.replace(os.sep, '/')
# Do the matching and the simplified path
for pattern in self.patterns:
if pattern.match(path):
return True
return False
# Identify the unecessary portion of the relative path
truncate = os.path.dirname(self.directory)
# Traverse the filesystem downward from the target directory's uri
# Errors: `os.walk()` will simply return an empty generator if the
# target directory does not exist.
wildcard_directories = set()
for curr_dir, _, files in os.walk(self.directory):
# find the path relative to the directory being added
if len(truncate) > 0:
_, _, short_path = curr_dir.partition(truncate)
else:
short_path = curr_dir
# remove leading / or \ if it is present
if short_path.startswith(os.sep):
short_path = short_path[1:]
wildcard_directory = False
if os.path.split(short_path)[0] in wildcard_directories:
# Parent directory has matched a pattern, all sub-nodes should
# be added too
wildcard_directories.add(short_path)
wildcard_directory = True
else:
# Check if directory path matches one of the patterns
if match_short_path(short_path):
# Directory matched pattern and it should therefor
# be added along with all of its contents
wildcard_directories.add(short_path)
wildcard_directory = True
# Always add directories within wildcard directories - even if they
# are empty
if wildcard_directory:
add_directory(short_path)
# Iterate across the files in the current directory
for filename in files:
# Find the filename relative to the directory being added
short_name = os.path.join(short_path, filename)
filepath = os.path.join(curr_dir, filename)
if wildcard_directory:
# Always add files in wildcard directories
add_file(short_name, filepath)
else:
# Add file (and all missing intermediary directories)
# if it matches one of the patterns
if match_short_path(short_name):
add_directory(short_path)
add_file(short_name, filepath)
# Send the request and present the response body to the user
req = requests.Request("POST", 'http://localhost', files=names)
prep = req.prepare()
return prep | python | def _prepare(self):
"""Pre-formats the multipart HTTP request to transmit the directory."""
names = []
added_directories = set()
def add_directory(short_path):
# Do not continue if this directory has already been added
if short_path in added_directories:
return
# Scan for first super-directory that has already been added
dir_base = short_path
dir_parts = []
while dir_base:
dir_base, dir_name = os.path.split(dir_base)
dir_parts.append(dir_name)
if dir_base in added_directories:
break
# Add missing intermediate directory nodes in the right order
while dir_parts:
dir_base = os.path.join(dir_base, dir_parts.pop())
# Create an empty, fake file to represent the directory
mock_file = io.StringIO()
mock_file.write(u'')
# Add this directory to those that will be sent
names.append(('files',
(dir_base.replace(os.sep, '/'), mock_file, 'application/x-directory')))
# Remember that this directory has already been sent
added_directories.add(dir_base)
def add_file(short_path, full_path):
try:
# Always add files in wildcard directories
names.append(('files', (short_name.replace(os.sep, '/'),
open(full_path, 'rb'),
'application/octet-stream')))
except OSError:
# File might have disappeared between `os.walk()` and `open()`
pass
def match_short_path(short_path):
# Remove initial path component so that all files are based in
# the target directory itself (not one level above)
if os.sep in short_path:
path = short_path.split(os.sep, 1)[1]
else:
return False
# Convert all path seperators to POSIX style
path = path.replace(os.sep, '/')
# Do the matching and the simplified path
for pattern in self.patterns:
if pattern.match(path):
return True
return False
# Identify the unecessary portion of the relative path
truncate = os.path.dirname(self.directory)
# Traverse the filesystem downward from the target directory's uri
# Errors: `os.walk()` will simply return an empty generator if the
# target directory does not exist.
wildcard_directories = set()
for curr_dir, _, files in os.walk(self.directory):
# find the path relative to the directory being added
if len(truncate) > 0:
_, _, short_path = curr_dir.partition(truncate)
else:
short_path = curr_dir
# remove leading / or \ if it is present
if short_path.startswith(os.sep):
short_path = short_path[1:]
wildcard_directory = False
if os.path.split(short_path)[0] in wildcard_directories:
# Parent directory has matched a pattern, all sub-nodes should
# be added too
wildcard_directories.add(short_path)
wildcard_directory = True
else:
# Check if directory path matches one of the patterns
if match_short_path(short_path):
# Directory matched pattern and it should therefor
# be added along with all of its contents
wildcard_directories.add(short_path)
wildcard_directory = True
# Always add directories within wildcard directories - even if they
# are empty
if wildcard_directory:
add_directory(short_path)
# Iterate across the files in the current directory
for filename in files:
# Find the filename relative to the directory being added
short_name = os.path.join(short_path, filename)
filepath = os.path.join(curr_dir, filename)
if wildcard_directory:
# Always add files in wildcard directories
add_file(short_name, filepath)
else:
# Add file (and all missing intermediary directories)
# if it matches one of the patterns
if match_short_path(short_name):
add_directory(short_path)
add_file(short_name, filepath)
# Send the request and present the response body to the user
req = requests.Request("POST", 'http://localhost', files=names)
prep = req.prepare()
return prep | ['def', '_prepare', '(', 'self', ')', ':', 'names', '=', '[', ']', 'added_directories', '=', 'set', '(', ')', 'def', 'add_directory', '(', 'short_path', ')', ':', '# Do not continue if this directory has already been added', 'if', 'short_path', 'in', 'added_directories', ':', 'return', '# Scan for first super-directory that has already been added', 'dir_base', '=', 'short_path', 'dir_parts', '=', '[', ']', 'while', 'dir_base', ':', 'dir_base', ',', 'dir_name', '=', 'os', '.', 'path', '.', 'split', '(', 'dir_base', ')', 'dir_parts', '.', 'append', '(', 'dir_name', ')', 'if', 'dir_base', 'in', 'added_directories', ':', 'break', '# Add missing intermediate directory nodes in the right order', 'while', 'dir_parts', ':', 'dir_base', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_base', ',', 'dir_parts', '.', 'pop', '(', ')', ')', '# Create an empty, fake file to represent the directory', 'mock_file', '=', 'io', '.', 'StringIO', '(', ')', 'mock_file', '.', 'write', '(', "u''", ')', '# Add this directory to those that will be sent', 'names', '.', 'append', '(', '(', "'files'", ',', '(', 'dir_base', '.', 'replace', '(', 'os', '.', 'sep', ',', "'/'", ')', ',', 'mock_file', ',', "'application/x-directory'", ')', ')', ')', '# Remember that this directory has already been sent', 'added_directories', '.', 'add', '(', 'dir_base', ')', 'def', 'add_file', '(', 'short_path', ',', 'full_path', ')', ':', 'try', ':', '# Always add files in wildcard directories', 'names', '.', 'append', '(', '(', "'files'", ',', '(', 'short_name', '.', 'replace', '(', 'os', '.', 'sep', ',', "'/'", ')', ',', 'open', '(', 'full_path', ',', "'rb'", ')', ',', "'application/octet-stream'", ')', ')', ')', 'except', 'OSError', ':', '# File might have disappeared between `os.walk()` and `open()`', 'pass', 'def', 'match_short_path', '(', 'short_path', ')', ':', '# Remove initial path component so that all files are based in', '# the target directory itself (not one level above)', 'if', 'os', '.', 'sep', 'in', 'short_path', ':', 'path', '=', 'short_path', '.', 'split', '(', 'os', '.', 'sep', ',', '1', ')', '[', '1', ']', 'else', ':', 'return', 'False', '# Convert all path seperators to POSIX style', 'path', '=', 'path', '.', 'replace', '(', 'os', '.', 'sep', ',', "'/'", ')', '# Do the matching and the simplified path', 'for', 'pattern', 'in', 'self', '.', 'patterns', ':', 'if', 'pattern', '.', 'match', '(', 'path', ')', ':', 'return', 'True', 'return', 'False', '# Identify the unecessary portion of the relative path', 'truncate', '=', 'os', '.', 'path', '.', 'dirname', '(', 'self', '.', 'directory', ')', "# Traverse the filesystem downward from the target directory's uri", '# Errors: `os.walk()` will simply return an empty generator if the', '# target directory does not exist.', 'wildcard_directories', '=', 'set', '(', ')', 'for', 'curr_dir', ',', '_', ',', 'files', 'in', 'os', '.', 'walk', '(', 'self', '.', 'directory', ')', ':', '# find the path relative to the directory being added', 'if', 'len', '(', 'truncate', ')', '>', '0', ':', '_', ',', '_', ',', 'short_path', '=', 'curr_dir', '.', 'partition', '(', 'truncate', ')', 'else', ':', 'short_path', '=', 'curr_dir', '# remove leading / or \\ if it is present', 'if', 'short_path', '.', 'startswith', '(', 'os', '.', 'sep', ')', ':', 'short_path', '=', 'short_path', '[', '1', ':', ']', 'wildcard_directory', '=', 'False', 'if', 'os', '.', 'path', '.', 'split', '(', 'short_path', ')', '[', '0', ']', 'in', 'wildcard_directories', ':', '# Parent directory has matched a pattern, all sub-nodes should', '# be added too', 'wildcard_directories', '.', 'add', '(', 'short_path', ')', 'wildcard_directory', '=', 'True', 'else', ':', '# Check if directory path matches one of the patterns', 'if', 'match_short_path', '(', 'short_path', ')', ':', '# Directory matched pattern and it should therefor', '# be added along with all of its contents', 'wildcard_directories', '.', 'add', '(', 'short_path', ')', 'wildcard_directory', '=', 'True', '# Always add directories within wildcard directories - even if they', '# are empty', 'if', 'wildcard_directory', ':', 'add_directory', '(', 'short_path', ')', '# Iterate across the files in the current directory', 'for', 'filename', 'in', 'files', ':', '# Find the filename relative to the directory being added', 'short_name', '=', 'os', '.', 'path', '.', 'join', '(', 'short_path', ',', 'filename', ')', 'filepath', '=', 'os', '.', 'path', '.', 'join', '(', 'curr_dir', ',', 'filename', ')', 'if', 'wildcard_directory', ':', '# Always add files in wildcard directories', 'add_file', '(', 'short_name', ',', 'filepath', ')', 'else', ':', '# Add file (and all missing intermediary directories)', '# if it matches one of the patterns', 'if', 'match_short_path', '(', 'short_name', ')', ':', 'add_directory', '(', 'short_path', ')', 'add_file', '(', 'short_name', ',', 'filepath', ')', '# Send the request and present the response body to the user', 'req', '=', 'requests', '.', 'Request', '(', '"POST"', ',', "'http://localhost'", ',', 'files', '=', 'names', ')', 'prep', '=', 'req', '.', 'prepare', '(', ')', 'return', 'prep'] | Pre-formats the multipart HTTP request to transmit the directory. | ['Pre', '-', 'formats', 'the', 'multipart', 'HTTP', 'request', 'to', 'transmit', 'the', 'directory', '.'] | train | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L415-L528 |
6,061 | c-w/gutenberg | gutenberg/acquire/metadata.py | MetadataCache.open | def open(self):
"""Opens an existing cache.
"""
try:
self.graph.open(self.cache_uri, create=False)
self._add_namespaces(self.graph)
self.is_open = True
except Exception:
raise InvalidCacheException('The cache is invalid or not created') | python | def open(self):
"""Opens an existing cache.
"""
try:
self.graph.open(self.cache_uri, create=False)
self._add_namespaces(self.graph)
self.is_open = True
except Exception:
raise InvalidCacheException('The cache is invalid or not created') | ['def', 'open', '(', 'self', ')', ':', 'try', ':', 'self', '.', 'graph', '.', 'open', '(', 'self', '.', 'cache_uri', ',', 'create', '=', 'False', ')', 'self', '.', '_add_namespaces', '(', 'self', '.', 'graph', ')', 'self', '.', 'is_open', '=', 'True', 'except', 'Exception', ':', 'raise', 'InvalidCacheException', '(', "'The cache is invalid or not created'", ')'] | Opens an existing cache. | ['Opens', 'an', 'existing', 'cache', '.'] | train | https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L61-L70 |
6,062 | ArduPilot/MAVProxy | MAVProxy/modules/lib/grapher.py | MavGraph.show | def show(self, lenmavlist, block=True, xlim_pipe=None):
'''show graph'''
if xlim_pipe is not None:
xlim_pipe[0].close()
self.xlim_pipe = xlim_pipe
if self.labels is not None:
labels = self.labels.split(',')
if len(labels) != len(fields)*lenmavlist:
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*lenmavlist))
return
else:
labels = None
for fi in range(0, lenmavlist):
timeshift = 0
for i in range(0, len(self.x)):
if self.first_only[i] and fi != 0:
self.x[i] = []
self.y[i] = []
if labels:
lab = labels[fi*len(self.fields):(fi+1)*len(self.fields)]
else:
lab = self.fields[:]
if self.multi:
col = colors[:]
else:
col = colors[fi*len(self.fields):]
self.plotit(self.x, self.y, lab, colors=col, title=self.title)
for i in range(0, len(self.x)):
self.x[i] = []
self.y[i] = []
if self.xlim_pipe is not None:
import matplotlib.animation
self.ani = matplotlib.animation.FuncAnimation(self.fig, self.xlim_change_check,
frames=10, interval=20000,
repeat=True, blit=False)
threading.Timer(0.1, self.xlim_timer).start()
pylab.draw()
pylab.show(block=block) | python | def show(self, lenmavlist, block=True, xlim_pipe=None):
'''show graph'''
if xlim_pipe is not None:
xlim_pipe[0].close()
self.xlim_pipe = xlim_pipe
if self.labels is not None:
labels = self.labels.split(',')
if len(labels) != len(fields)*lenmavlist:
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*lenmavlist))
return
else:
labels = None
for fi in range(0, lenmavlist):
timeshift = 0
for i in range(0, len(self.x)):
if self.first_only[i] and fi != 0:
self.x[i] = []
self.y[i] = []
if labels:
lab = labels[fi*len(self.fields):(fi+1)*len(self.fields)]
else:
lab = self.fields[:]
if self.multi:
col = colors[:]
else:
col = colors[fi*len(self.fields):]
self.plotit(self.x, self.y, lab, colors=col, title=self.title)
for i in range(0, len(self.x)):
self.x[i] = []
self.y[i] = []
if self.xlim_pipe is not None:
import matplotlib.animation
self.ani = matplotlib.animation.FuncAnimation(self.fig, self.xlim_change_check,
frames=10, interval=20000,
repeat=True, blit=False)
threading.Timer(0.1, self.xlim_timer).start()
pylab.draw()
pylab.show(block=block) | ['def', 'show', '(', 'self', ',', 'lenmavlist', ',', 'block', '=', 'True', ',', 'xlim_pipe', '=', 'None', ')', ':', 'if', 'xlim_pipe', 'is', 'not', 'None', ':', 'xlim_pipe', '[', '0', ']', '.', 'close', '(', ')', 'self', '.', 'xlim_pipe', '=', 'xlim_pipe', 'if', 'self', '.', 'labels', 'is', 'not', 'None', ':', 'labels', '=', 'self', '.', 'labels', '.', 'split', '(', "','", ')', 'if', 'len', '(', 'labels', ')', '!=', 'len', '(', 'fields', ')', '*', 'lenmavlist', ':', 'print', '(', '"Number of labels (%u) must match number of fields (%u)"', '%', '(', 'len', '(', 'labels', ')', ',', 'len', '(', 'fields', ')', '*', 'lenmavlist', ')', ')', 'return', 'else', ':', 'labels', '=', 'None', 'for', 'fi', 'in', 'range', '(', '0', ',', 'lenmavlist', ')', ':', 'timeshift', '=', '0', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'self', '.', 'x', ')', ')', ':', 'if', 'self', '.', 'first_only', '[', 'i', ']', 'and', 'fi', '!=', '0', ':', 'self', '.', 'x', '[', 'i', ']', '=', '[', ']', 'self', '.', 'y', '[', 'i', ']', '=', '[', ']', 'if', 'labels', ':', 'lab', '=', 'labels', '[', 'fi', '*', 'len', '(', 'self', '.', 'fields', ')', ':', '(', 'fi', '+', '1', ')', '*', 'len', '(', 'self', '.', 'fields', ')', ']', 'else', ':', 'lab', '=', 'self', '.', 'fields', '[', ':', ']', 'if', 'self', '.', 'multi', ':', 'col', '=', 'colors', '[', ':', ']', 'else', ':', 'col', '=', 'colors', '[', 'fi', '*', 'len', '(', 'self', '.', 'fields', ')', ':', ']', 'self', '.', 'plotit', '(', 'self', '.', 'x', ',', 'self', '.', 'y', ',', 'lab', ',', 'colors', '=', 'col', ',', 'title', '=', 'self', '.', 'title', ')', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'self', '.', 'x', ')', ')', ':', 'self', '.', 'x', '[', 'i', ']', '=', '[', ']', 'self', '.', 'y', '[', 'i', ']', '=', '[', ']', 'if', 'self', '.', 'xlim_pipe', 'is', 'not', 'None', ':', 'import', 'matplotlib', '.', 'animation', 'self', '.', 'ani', '=', 'matplotlib', '.', 'animation', '.', 'FuncAnimation', '(', 'self', '.', 'fig', ',', 'self', '.', 'xlim_change_check', ',', 'frames', '=', '10', ',', 'interval', '=', '20000', ',', 'repeat', '=', 'True', ',', 'blit', '=', 'False', ')', 'threading', '.', 'Timer', '(', '0.1', ',', 'self', '.', 'xlim_timer', ')', '.', 'start', '(', ')', 'pylab', '.', 'draw', '(', ')', 'pylab', '.', 'show', '(', 'block', '=', 'block', ')'] | show graph | ['show', 'graph'] | train | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/grapher.py#L482-L523 |
6,063 | Cue/scales | src/greplin/scales/util.py | EWMA.tick | def tick(self):
"""Updates rates and decays"""
count = self._uncounted.getAndSet(0)
instantRate = float(count) / self.interval
if self._initialized:
self.rate += (self.alpha * (instantRate - self.rate))
else:
self.rate = instantRate
self._initialized = True | python | def tick(self):
"""Updates rates and decays"""
count = self._uncounted.getAndSet(0)
instantRate = float(count) / self.interval
if self._initialized:
self.rate += (self.alpha * (instantRate - self.rate))
else:
self.rate = instantRate
self._initialized = True | ['def', 'tick', '(', 'self', ')', ':', 'count', '=', 'self', '.', '_uncounted', '.', 'getAndSet', '(', '0', ')', 'instantRate', '=', 'float', '(', 'count', ')', '/', 'self', '.', 'interval', 'if', 'self', '.', '_initialized', ':', 'self', '.', 'rate', '+=', '(', 'self', '.', 'alpha', '*', '(', 'instantRate', '-', 'self', '.', 'rate', ')', ')', 'else', ':', 'self', '.', 'rate', '=', 'instantRate', 'self', '.', '_initialized', '=', 'True'] | Updates rates and decays | ['Updates', 'rates', 'and', 'decays'] | train | https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/util.py#L231-L240 |
6,064 | saltstack/salt | salt/modules/file.py | get_sum | def get_sum(path, form='sha256'):
'''
Return the checksum for the given file. The following checksum algorithms
are supported:
* md5
* sha1
* sha224
* sha256 **(default)**
* sha384
* sha512
path
path to the file or directory
form
desired sum format
CLI Example:
.. code-block:: bash
salt '*' file.get_sum /etc/passwd sha512
'''
path = os.path.expanduser(path)
if not os.path.isfile(path):
return 'File not found'
return salt.utils.hashutils.get_hash(path, form, 4096) | python | def get_sum(path, form='sha256'):
'''
Return the checksum for the given file. The following checksum algorithms
are supported:
* md5
* sha1
* sha224
* sha256 **(default)**
* sha384
* sha512
path
path to the file or directory
form
desired sum format
CLI Example:
.. code-block:: bash
salt '*' file.get_sum /etc/passwd sha512
'''
path = os.path.expanduser(path)
if not os.path.isfile(path):
return 'File not found'
return salt.utils.hashutils.get_hash(path, form, 4096) | ['def', 'get_sum', '(', 'path', ',', 'form', '=', "'sha256'", ')', ':', 'path', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'path', ')', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'path', ')', ':', 'return', "'File not found'", 'return', 'salt', '.', 'utils', '.', 'hashutils', '.', 'get_hash', '(', 'path', ',', 'form', ',', '4096', ')'] | Return the checksum for the given file. The following checksum algorithms
are supported:
* md5
* sha1
* sha224
* sha256 **(default)**
* sha384
* sha512
path
path to the file or directory
form
desired sum format
CLI Example:
.. code-block:: bash
salt '*' file.get_sum /etc/passwd sha512 | ['Return', 'the', 'checksum', 'for', 'the', 'given', 'file', '.', 'The', 'following', 'checksum', 'algorithms', 'are', 'supported', ':'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L651-L679 |
6,065 | pkgw/pwkit | pwkit/lmmin.py | Problem.p_side | def p_side(self, idx, sidedness):
"""Acceptable values for *sidedness* are "auto", "pos",
"neg", and "two"."""
dsideval = _dside_names.get(sidedness)
if dsideval is None:
raise ValueError('unrecognized sidedness "%s"' % sidedness)
p = self._pinfob
p[idx] = (p[idx] & ~PI_M_SIDE) | dsideval
return self | python | def p_side(self, idx, sidedness):
"""Acceptable values for *sidedness* are "auto", "pos",
"neg", and "two"."""
dsideval = _dside_names.get(sidedness)
if dsideval is None:
raise ValueError('unrecognized sidedness "%s"' % sidedness)
p = self._pinfob
p[idx] = (p[idx] & ~PI_M_SIDE) | dsideval
return self | ['def', 'p_side', '(', 'self', ',', 'idx', ',', 'sidedness', ')', ':', 'dsideval', '=', '_dside_names', '.', 'get', '(', 'sidedness', ')', 'if', 'dsideval', 'is', 'None', ':', 'raise', 'ValueError', '(', '\'unrecognized sidedness "%s"\'', '%', 'sidedness', ')', 'p', '=', 'self', '.', '_pinfob', 'p', '[', 'idx', ']', '=', '(', 'p', '[', 'idx', ']', '&', '~', 'PI_M_SIDE', ')', '|', 'dsideval', 'return', 'self'] | Acceptable values for *sidedness* are "auto", "pos",
"neg", and "two". | ['Acceptable', 'values', 'for', '*', 'sidedness', '*', 'are', 'auto', 'pos', 'neg', 'and', 'two', '.'] | train | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/lmmin.py#L1397-L1406 |
6,066 | saltstack/salt | salt/modules/container_resource.py | _validate | def _validate(wrapped):
'''
Decorator for common function argument validation
'''
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
container_type = kwargs.get('container_type')
exec_driver = kwargs.get('exec_driver')
valid_driver = {
'docker': ('lxc-attach', 'nsenter', 'docker-exec'),
'lxc': ('lxc-attach',),
'nspawn': ('nsenter',),
}
if container_type not in valid_driver:
raise SaltInvocationError(
'Invalid container type \'{0}\'. Valid types are: {1}'
.format(container_type, ', '.join(sorted(valid_driver)))
)
if exec_driver not in valid_driver[container_type]:
raise SaltInvocationError(
'Invalid command execution driver. Valid drivers are: {0}'
.format(', '.join(valid_driver[container_type]))
)
if exec_driver == 'lxc-attach' and not salt.utils.path.which('lxc-attach'):
raise SaltInvocationError(
'The \'lxc-attach\' execution driver has been chosen, but '
'lxc-attach is not available. LXC may not be installed.'
)
return wrapped(*args, **salt.utils.args.clean_kwargs(**kwargs))
return wrapper | python | def _validate(wrapped):
'''
Decorator for common function argument validation
'''
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
container_type = kwargs.get('container_type')
exec_driver = kwargs.get('exec_driver')
valid_driver = {
'docker': ('lxc-attach', 'nsenter', 'docker-exec'),
'lxc': ('lxc-attach',),
'nspawn': ('nsenter',),
}
if container_type not in valid_driver:
raise SaltInvocationError(
'Invalid container type \'{0}\'. Valid types are: {1}'
.format(container_type, ', '.join(sorted(valid_driver)))
)
if exec_driver not in valid_driver[container_type]:
raise SaltInvocationError(
'Invalid command execution driver. Valid drivers are: {0}'
.format(', '.join(valid_driver[container_type]))
)
if exec_driver == 'lxc-attach' and not salt.utils.path.which('lxc-attach'):
raise SaltInvocationError(
'The \'lxc-attach\' execution driver has been chosen, but '
'lxc-attach is not available. LXC may not be installed.'
)
return wrapped(*args, **salt.utils.args.clean_kwargs(**kwargs))
return wrapper | ['def', '_validate', '(', 'wrapped', ')', ':', '@', 'functools', '.', 'wraps', '(', 'wrapped', ')', 'def', 'wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'container_type', '=', 'kwargs', '.', 'get', '(', "'container_type'", ')', 'exec_driver', '=', 'kwargs', '.', 'get', '(', "'exec_driver'", ')', 'valid_driver', '=', '{', "'docker'", ':', '(', "'lxc-attach'", ',', "'nsenter'", ',', "'docker-exec'", ')', ',', "'lxc'", ':', '(', "'lxc-attach'", ',', ')', ',', "'nspawn'", ':', '(', "'nsenter'", ',', ')', ',', '}', 'if', 'container_type', 'not', 'in', 'valid_driver', ':', 'raise', 'SaltInvocationError', '(', "'Invalid container type \\'{0}\\'. Valid types are: {1}'", '.', 'format', '(', 'container_type', ',', "', '", '.', 'join', '(', 'sorted', '(', 'valid_driver', ')', ')', ')', ')', 'if', 'exec_driver', 'not', 'in', 'valid_driver', '[', 'container_type', ']', ':', 'raise', 'SaltInvocationError', '(', "'Invalid command execution driver. Valid drivers are: {0}'", '.', 'format', '(', "', '", '.', 'join', '(', 'valid_driver', '[', 'container_type', ']', ')', ')', ')', 'if', 'exec_driver', '==', "'lxc-attach'", 'and', 'not', 'salt', '.', 'utils', '.', 'path', '.', 'which', '(', "'lxc-attach'", ')', ':', 'raise', 'SaltInvocationError', '(', "'The \\'lxc-attach\\' execution driver has been chosen, but '", "'lxc-attach is not available. LXC may not be installed.'", ')', 'return', 'wrapped', '(', '*', 'args', ',', '*', '*', 'salt', '.', 'utils', '.', 'args', '.', 'clean_kwargs', '(', '*', '*', 'kwargs', ')', ')', 'return', 'wrapper'] | Decorator for common function argument validation | ['Decorator', 'for', 'common', 'function', 'argument', 'validation'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/container_resource.py#L35-L64 |
6,067 | google/grr | grr/server/grr_response_server/databases/mysql_hunts.py | MySQLDBHuntMixin.ReadHuntObjects | def ReadHuntObjects(self,
offset,
count,
with_creator=None,
created_after=None,
with_description_match=None,
cursor=None):
"""Reads multiple hunt objects from the database."""
query = "SELECT {columns} FROM hunts ".format(columns=_HUNT_COLUMNS_SELECT)
args = []
components = []
if with_creator is not None:
components.append("creator = %s ")
args.append(with_creator)
if created_after is not None:
components.append("create_timestamp > FROM_UNIXTIME(%s) ")
args.append(mysql_utils.RDFDatetimeToTimestamp(created_after))
if with_description_match is not None:
components.append("description LIKE %s")
args.append("%" + with_description_match + "%")
if components:
query += "WHERE " + " AND ".join(components)
query += " ORDER BY create_timestamp DESC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
return [self._HuntObjectFromRow(row) for row in cursor.fetchall()] | python | def ReadHuntObjects(self,
offset,
count,
with_creator=None,
created_after=None,
with_description_match=None,
cursor=None):
"""Reads multiple hunt objects from the database."""
query = "SELECT {columns} FROM hunts ".format(columns=_HUNT_COLUMNS_SELECT)
args = []
components = []
if with_creator is not None:
components.append("creator = %s ")
args.append(with_creator)
if created_after is not None:
components.append("create_timestamp > FROM_UNIXTIME(%s) ")
args.append(mysql_utils.RDFDatetimeToTimestamp(created_after))
if with_description_match is not None:
components.append("description LIKE %s")
args.append("%" + with_description_match + "%")
if components:
query += "WHERE " + " AND ".join(components)
query += " ORDER BY create_timestamp DESC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
return [self._HuntObjectFromRow(row) for row in cursor.fetchall()] | ['def', 'ReadHuntObjects', '(', 'self', ',', 'offset', ',', 'count', ',', 'with_creator', '=', 'None', ',', 'created_after', '=', 'None', ',', 'with_description_match', '=', 'None', ',', 'cursor', '=', 'None', ')', ':', 'query', '=', '"SELECT {columns} FROM hunts "', '.', 'format', '(', 'columns', '=', '_HUNT_COLUMNS_SELECT', ')', 'args', '=', '[', ']', 'components', '=', '[', ']', 'if', 'with_creator', 'is', 'not', 'None', ':', 'components', '.', 'append', '(', '"creator = %s "', ')', 'args', '.', 'append', '(', 'with_creator', ')', 'if', 'created_after', 'is', 'not', 'None', ':', 'components', '.', 'append', '(', '"create_timestamp > FROM_UNIXTIME(%s) "', ')', 'args', '.', 'append', '(', 'mysql_utils', '.', 'RDFDatetimeToTimestamp', '(', 'created_after', ')', ')', 'if', 'with_description_match', 'is', 'not', 'None', ':', 'components', '.', 'append', '(', '"description LIKE %s"', ')', 'args', '.', 'append', '(', '"%"', '+', 'with_description_match', '+', '"%"', ')', 'if', 'components', ':', 'query', '+=', '"WHERE "', '+', '" AND "', '.', 'join', '(', 'components', ')', 'query', '+=', '" ORDER BY create_timestamp DESC LIMIT %s OFFSET %s"', 'args', '.', 'append', '(', 'count', ')', 'args', '.', 'append', '(', 'offset', ')', 'cursor', '.', 'execute', '(', 'query', ',', 'args', ')', 'return', '[', 'self', '.', '_HuntObjectFromRow', '(', 'row', ')', 'for', 'row', 'in', 'cursor', '.', 'fetchall', '(', ')', ']'] | Reads multiple hunt objects from the database. | ['Reads', 'multiple', 'hunt', 'objects', 'from', 'the', 'database', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_hunts.py#L222-L254 |
6,068 | faucamp/python-gsmmodem | gsmmodem/modem.py | SentSms.status | def status(self):
""" Status of this SMS. Can be ENROUTE, DELIVERED or FAILED
The actual status report object may be accessed via the 'report' attribute
if status is 'DELIVERED' or 'FAILED'
"""
if self.report == None:
return SentSms.ENROUTE
else:
return SentSms.DELIVERED if self.report.deliveryStatus == StatusReport.DELIVERED else SentSms.FAILED | python | def status(self):
""" Status of this SMS. Can be ENROUTE, DELIVERED or FAILED
The actual status report object may be accessed via the 'report' attribute
if status is 'DELIVERED' or 'FAILED'
"""
if self.report == None:
return SentSms.ENROUTE
else:
return SentSms.DELIVERED if self.report.deliveryStatus == StatusReport.DELIVERED else SentSms.FAILED | ['def', 'status', '(', 'self', ')', ':', 'if', 'self', '.', 'report', '==', 'None', ':', 'return', 'SentSms', '.', 'ENROUTE', 'else', ':', 'return', 'SentSms', '.', 'DELIVERED', 'if', 'self', '.', 'report', '.', 'deliveryStatus', '==', 'StatusReport', '.', 'DELIVERED', 'else', 'SentSms', '.', 'FAILED'] | Status of this SMS. Can be ENROUTE, DELIVERED or FAILED
The actual status report object may be accessed via the 'report' attribute
if status is 'DELIVERED' or 'FAILED' | ['Status', 'of', 'this', 'SMS', '.', 'Can', 'be', 'ENROUTE', 'DELIVERED', 'or', 'FAILED', 'The', 'actual', 'status', 'report', 'object', 'may', 'be', 'accessed', 'via', 'the', 'report', 'attribute', 'if', 'status', 'is', 'DELIVERED', 'or', 'FAILED'] | train | https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/gsmmodem/modem.py#L76-L85 |
6,069 | iotile/coretools | iotilebuild/iotile/build/dev/resolverchain.py | DependencyResolverChain.update_dependency | def update_dependency(self, tile, depinfo, destdir=None):
"""Attempt to install or update a dependency to the latest version.
Args:
tile (IOTile): An IOTile object describing the tile that has the dependency
depinfo (dict): a dictionary from tile.dependencies specifying the dependency
destdir (string): An optional folder into which to unpack the dependency
Returns:
string: a string indicating the outcome. Possible values are:
"already installed"
"installed"
"updated"
"not found"
"""
if destdir is None:
destdir = os.path.join(tile.folder, 'build', 'deps', depinfo['unique_id'])
has_version = False
had_version = False
if os.path.exists(destdir):
has_version = True
had_version = True
for priority, rule in self.rules:
if not self._check_rule(rule, depinfo):
continue
resolver = self._find_resolver(rule)
if has_version:
deptile = IOTile(destdir)
# If the dependency is not up to date, don't do anything
depstatus = self._check_dep(depinfo, deptile, resolver)
if depstatus is False:
shutil.rmtree(destdir)
has_version = False
else:
continue
# Now try to resolve this dependency with the latest version
result = resolver.resolve(depinfo, destdir)
if not result['found'] and result.get('stop', False):
return 'not found'
if not result['found']:
continue
settings = {
'resolver': resolver.__class__.__name__,
'factory_args': rule[2]
}
if 'settings' in result:
settings['settings'] = result['settings']
self._save_depsettings(destdir, settings)
if had_version:
return "updated"
return "installed"
if has_version:
return "already installed"
return "not found" | python | def update_dependency(self, tile, depinfo, destdir=None):
"""Attempt to install or update a dependency to the latest version.
Args:
tile (IOTile): An IOTile object describing the tile that has the dependency
depinfo (dict): a dictionary from tile.dependencies specifying the dependency
destdir (string): An optional folder into which to unpack the dependency
Returns:
string: a string indicating the outcome. Possible values are:
"already installed"
"installed"
"updated"
"not found"
"""
if destdir is None:
destdir = os.path.join(tile.folder, 'build', 'deps', depinfo['unique_id'])
has_version = False
had_version = False
if os.path.exists(destdir):
has_version = True
had_version = True
for priority, rule in self.rules:
if not self._check_rule(rule, depinfo):
continue
resolver = self._find_resolver(rule)
if has_version:
deptile = IOTile(destdir)
# If the dependency is not up to date, don't do anything
depstatus = self._check_dep(depinfo, deptile, resolver)
if depstatus is False:
shutil.rmtree(destdir)
has_version = False
else:
continue
# Now try to resolve this dependency with the latest version
result = resolver.resolve(depinfo, destdir)
if not result['found'] and result.get('stop', False):
return 'not found'
if not result['found']:
continue
settings = {
'resolver': resolver.__class__.__name__,
'factory_args': rule[2]
}
if 'settings' in result:
settings['settings'] = result['settings']
self._save_depsettings(destdir, settings)
if had_version:
return "updated"
return "installed"
if has_version:
return "already installed"
return "not found" | ['def', 'update_dependency', '(', 'self', ',', 'tile', ',', 'depinfo', ',', 'destdir', '=', 'None', ')', ':', 'if', 'destdir', 'is', 'None', ':', 'destdir', '=', 'os', '.', 'path', '.', 'join', '(', 'tile', '.', 'folder', ',', "'build'", ',', "'deps'", ',', 'depinfo', '[', "'unique_id'", ']', ')', 'has_version', '=', 'False', 'had_version', '=', 'False', 'if', 'os', '.', 'path', '.', 'exists', '(', 'destdir', ')', ':', 'has_version', '=', 'True', 'had_version', '=', 'True', 'for', 'priority', ',', 'rule', 'in', 'self', '.', 'rules', ':', 'if', 'not', 'self', '.', '_check_rule', '(', 'rule', ',', 'depinfo', ')', ':', 'continue', 'resolver', '=', 'self', '.', '_find_resolver', '(', 'rule', ')', 'if', 'has_version', ':', 'deptile', '=', 'IOTile', '(', 'destdir', ')', "# If the dependency is not up to date, don't do anything", 'depstatus', '=', 'self', '.', '_check_dep', '(', 'depinfo', ',', 'deptile', ',', 'resolver', ')', 'if', 'depstatus', 'is', 'False', ':', 'shutil', '.', 'rmtree', '(', 'destdir', ')', 'has_version', '=', 'False', 'else', ':', 'continue', '# Now try to resolve this dependency with the latest version', 'result', '=', 'resolver', '.', 'resolve', '(', 'depinfo', ',', 'destdir', ')', 'if', 'not', 'result', '[', "'found'", ']', 'and', 'result', '.', 'get', '(', "'stop'", ',', 'False', ')', ':', 'return', "'not found'", 'if', 'not', 'result', '[', "'found'", ']', ':', 'continue', 'settings', '=', '{', "'resolver'", ':', 'resolver', '.', '__class__', '.', '__name__', ',', "'factory_args'", ':', 'rule', '[', '2', ']', '}', 'if', "'settings'", 'in', 'result', ':', 'settings', '[', "'settings'", ']', '=', 'result', '[', "'settings'", ']', 'self', '.', '_save_depsettings', '(', 'destdir', ',', 'settings', ')', 'if', 'had_version', ':', 'return', '"updated"', 'return', '"installed"', 'if', 'has_version', ':', 'return', '"already installed"', 'return', '"not found"'] | Attempt to install or update a dependency to the latest version.
Args:
tile (IOTile): An IOTile object describing the tile that has the dependency
depinfo (dict): a dictionary from tile.dependencies specifying the dependency
destdir (string): An optional folder into which to unpack the dependency
Returns:
string: a string indicating the outcome. Possible values are:
"already installed"
"installed"
"updated"
"not found" | ['Attempt', 'to', 'install', 'or', 'update', 'a', 'dependency', 'to', 'the', 'latest', 'version', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/dev/resolverchain.py#L108-L176 |
6,070 | mitsei/dlkit | dlkit/json_/osid/metadata.py | Metadata.supports_coordinate_type | def supports_coordinate_type(self, coordinate_type):
"""Tests if the given coordinate type is supported.
arg: coordinate_type (osid.type.Type): a coordinate Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``COORDINATE``
raise: NullArgument - ``coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
if self._kwargs['syntax'] not in ['``COORDINATE``']:
raise errors.IllegalState()
return coordinate_type in self.get_coordinate_types | python | def supports_coordinate_type(self, coordinate_type):
"""Tests if the given coordinate type is supported.
arg: coordinate_type (osid.type.Type): a coordinate Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``COORDINATE``
raise: NullArgument - ``coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
if self._kwargs['syntax'] not in ['``COORDINATE``']:
raise errors.IllegalState()
return coordinate_type in self.get_coordinate_types | ['def', 'supports_coordinate_type', '(', 'self', ',', 'coordinate_type', ')', ':', '# Implemented from template for osid.Metadata.supports_coordinate_type', 'if', 'self', '.', '_kwargs', '[', "'syntax'", ']', 'not', 'in', '[', "'``COORDINATE``'", ']', ':', 'raise', 'errors', '.', 'IllegalState', '(', ')', 'return', 'coordinate_type', 'in', 'self', '.', 'get_coordinate_types'] | Tests if the given coordinate type is supported.
arg: coordinate_type (osid.type.Type): a coordinate Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``COORDINATE``
raise: NullArgument - ``coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.* | ['Tests', 'if', 'the', 'given', 'coordinate', 'type', 'is', 'supported', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/osid/metadata.py#L304-L318 |
6,071 | wangsix/vmo | vmo/analysis/analysis.py | _query_init | def _query_init(k, oracle, query, method='all'):
"""A helper function for query-matching function initialization."""
if method == 'all':
a = np.subtract(query, [oracle.f_array[t] for t in oracle.latent[oracle.data[k]]])
dvec = (a * a).sum(axis=1) # Could skip the sqrt
_d = dvec.argmin()
return oracle.latent[oracle.data[k]][_d], dvec[_d]
else:
a = np.subtract(query, oracle.f_array[k])
dvec = (a * a).sum() # Could skip the sqrt
return k, dvec | python | def _query_init(k, oracle, query, method='all'):
"""A helper function for query-matching function initialization."""
if method == 'all':
a = np.subtract(query, [oracle.f_array[t] for t in oracle.latent[oracle.data[k]]])
dvec = (a * a).sum(axis=1) # Could skip the sqrt
_d = dvec.argmin()
return oracle.latent[oracle.data[k]][_d], dvec[_d]
else:
a = np.subtract(query, oracle.f_array[k])
dvec = (a * a).sum() # Could skip the sqrt
return k, dvec | ['def', '_query_init', '(', 'k', ',', 'oracle', ',', 'query', ',', 'method', '=', "'all'", ')', ':', 'if', 'method', '==', "'all'", ':', 'a', '=', 'np', '.', 'subtract', '(', 'query', ',', '[', 'oracle', '.', 'f_array', '[', 't', ']', 'for', 't', 'in', 'oracle', '.', 'latent', '[', 'oracle', '.', 'data', '[', 'k', ']', ']', ']', ')', 'dvec', '=', '(', 'a', '*', 'a', ')', '.', 'sum', '(', 'axis', '=', '1', ')', '# Could skip the sqrt', '_d', '=', 'dvec', '.', 'argmin', '(', ')', 'return', 'oracle', '.', 'latent', '[', 'oracle', '.', 'data', '[', 'k', ']', ']', '[', '_d', ']', ',', 'dvec', '[', '_d', ']', 'else', ':', 'a', '=', 'np', '.', 'subtract', '(', 'query', ',', 'oracle', '.', 'f_array', '[', 'k', ']', ')', 'dvec', '=', '(', 'a', '*', 'a', ')', '.', 'sum', '(', ')', '# Could skip the sqrt', 'return', 'k', ',', 'dvec'] | A helper function for query-matching function initialization. | ['A', 'helper', 'function', 'for', 'query', '-', 'matching', 'function', 'initialization', '.'] | train | https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/analysis/analysis.py#L449-L460 |
6,072 | chimera0/accel-brain-code | Reinforcement-Learning/pyqlearning/annealing_model.py | AnnealingModel.set_var_arr | def set_var_arr(self, value):
''' setter '''
if isinstance(value, np.ndarray):
self.__var_arr = value
else:
raise TypeError() | python | def set_var_arr(self, value):
''' setter '''
if isinstance(value, np.ndarray):
self.__var_arr = value
else:
raise TypeError() | ['def', 'set_var_arr', '(', 'self', ',', 'value', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'np', '.', 'ndarray', ')', ':', 'self', '.', '__var_arr', '=', 'value', 'else', ':', 'raise', 'TypeError', '(', ')'] | setter | ['setter'] | train | https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/pyqlearning/annealing_model.py#L92-L97 |
6,073 | DLR-RM/RAFCON | source/rafcon/gui/mygaphas/items/state.py | StateView._calculate_port_pos_on_line | def _calculate_port_pos_on_line(self, port_num, side_length, port_width=None):
"""Calculate the position of a port on a line
The position depends on the number of element. Elements are equally spaced. If the end of the line is
reached, ports are stacked.
:param int port_num: The number of the port of that type
:param float side_length: The length of the side the element is placed on
:param float port_width: The width of one port
:return: The position on the line for the given port
:rtype: float
"""
if port_width is None:
port_width = 2 * self.border_width
border_size = self.border_width
pos = 0.5 * border_size + port_num * port_width
outermost_pos = max(side_length / 2., side_length - 0.5 * border_size - port_width)
pos = min(pos, outermost_pos)
return pos | python | def _calculate_port_pos_on_line(self, port_num, side_length, port_width=None):
"""Calculate the position of a port on a line
The position depends on the number of element. Elements are equally spaced. If the end of the line is
reached, ports are stacked.
:param int port_num: The number of the port of that type
:param float side_length: The length of the side the element is placed on
:param float port_width: The width of one port
:return: The position on the line for the given port
:rtype: float
"""
if port_width is None:
port_width = 2 * self.border_width
border_size = self.border_width
pos = 0.5 * border_size + port_num * port_width
outermost_pos = max(side_length / 2., side_length - 0.5 * border_size - port_width)
pos = min(pos, outermost_pos)
return pos | ['def', '_calculate_port_pos_on_line', '(', 'self', ',', 'port_num', ',', 'side_length', ',', 'port_width', '=', 'None', ')', ':', 'if', 'port_width', 'is', 'None', ':', 'port_width', '=', '2', '*', 'self', '.', 'border_width', 'border_size', '=', 'self', '.', 'border_width', 'pos', '=', '0.5', '*', 'border_size', '+', 'port_num', '*', 'port_width', 'outermost_pos', '=', 'max', '(', 'side_length', '/', '2.', ',', 'side_length', '-', '0.5', '*', 'border_size', '-', 'port_width', ')', 'pos', '=', 'min', '(', 'pos', ',', 'outermost_pos', ')', 'return', 'pos'] | Calculate the position of a port on a line
The position depends on the number of element. Elements are equally spaced. If the end of the line is
reached, ports are stacked.
:param int port_num: The number of the port of that type
:param float side_length: The length of the side the element is placed on
:param float port_width: The width of one port
:return: The position on the line for the given port
:rtype: float | ['Calculate', 'the', 'position', 'of', 'a', 'port', 'on', 'a', 'line'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/items/state.py#L796-L813 |
6,074 | yougov/pmxbot | pmxbot/core.py | Handler._set_implied_name | def _set_implied_name(self):
"Allow the name of this handler to default to the function name."
if getattr(self, 'name', None) is None:
self.name = self.func.__name__
self.name = self.name.lower() | python | def _set_implied_name(self):
"Allow the name of this handler to default to the function name."
if getattr(self, 'name', None) is None:
self.name = self.func.__name__
self.name = self.name.lower() | ['def', '_set_implied_name', '(', 'self', ')', ':', 'if', 'getattr', '(', 'self', ',', "'name'", ',', 'None', ')', 'is', 'None', ':', 'self', '.', 'name', '=', 'self', '.', 'func', '.', '__name__', 'self', '.', 'name', '=', 'self', '.', 'name', '.', 'lower', '(', ')'] | Allow the name of this handler to default to the function name. | ['Allow', 'the', 'name', 'of', 'this', 'handler', 'to', 'default', 'to', 'the', 'function', 'name', '.'] | train | https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/core.py#L181-L185 |
6,075 | senaite/senaite.core | bika/lims/browser/analyses/view.py | AnalysesView.is_analysis_instrument_valid | def is_analysis_instrument_valid(self, analysis_brain):
"""Return if the analysis has a valid instrument.
If the analysis passed in is from ReferenceAnalysis type or does not
have an instrument assigned, returns True
:param analysis_brain: Brain that represents an analysis
:return: True if the instrument assigned is valid or is None"""
if analysis_brain.meta_type == 'ReferenceAnalysis':
# If this is a ReferenceAnalysis, there is no need to check the
# validity of the instrument, because this is a QC analysis and by
# definition, it has the ability to promote an instrument to a
# valid state if the result is correct.
return True
instrument = self.get_instrument(analysis_brain)
return not instrument or instrument.isValid() | python | def is_analysis_instrument_valid(self, analysis_brain):
"""Return if the analysis has a valid instrument.
If the analysis passed in is from ReferenceAnalysis type or does not
have an instrument assigned, returns True
:param analysis_brain: Brain that represents an analysis
:return: True if the instrument assigned is valid or is None"""
if analysis_brain.meta_type == 'ReferenceAnalysis':
# If this is a ReferenceAnalysis, there is no need to check the
# validity of the instrument, because this is a QC analysis and by
# definition, it has the ability to promote an instrument to a
# valid state if the result is correct.
return True
instrument = self.get_instrument(analysis_brain)
return not instrument or instrument.isValid() | ['def', 'is_analysis_instrument_valid', '(', 'self', ',', 'analysis_brain', ')', ':', 'if', 'analysis_brain', '.', 'meta_type', '==', "'ReferenceAnalysis'", ':', '# If this is a ReferenceAnalysis, there is no need to check the', '# validity of the instrument, because this is a QC analysis and by', '# definition, it has the ability to promote an instrument to a', '# valid state if the result is correct.', 'return', 'True', 'instrument', '=', 'self', '.', 'get_instrument', '(', 'analysis_brain', ')', 'return', 'not', 'instrument', 'or', 'instrument', '.', 'isValid', '(', ')'] | Return if the analysis has a valid instrument.
If the analysis passed in is from ReferenceAnalysis type or does not
have an instrument assigned, returns True
:param analysis_brain: Brain that represents an analysis
:return: True if the instrument assigned is valid or is None | ['Return', 'if', 'the', 'analysis', 'has', 'a', 'valid', 'instrument', '.'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analyses/view.py#L348-L363 |
6,076 | UCL-INGI/INGInious | inginious/common/tags.py | Tag.create_tags_from_dict | def create_tags_from_dict(cls, tag_dict):
"""
Build a tuple of list of Tag objects based on the tag_dict.
The tuple contains 3 lists.
- The first list contains skill tags
- The second list contains misconception tags
- The third list contains category tags
"""
tag_list_common = []
tag_list_misconception = []
tag_list_organisational = []
for tag in tag_dict:
try:
id = tag_dict[tag]["id"]
name = tag_dict[tag]["name"]
visible = tag_dict[tag]["visible"]
description = tag_dict[tag]["description"]
type = tag_dict[tag]["type"]
if type == 2:
tag_list_organisational.insert(int(tag), Tag(id, name, description, visible, type))
elif type == 1:
tag_list_misconception.insert(int(tag), Tag(id, name, description, visible, type))
else:
tag_list_common.insert(int(tag), Tag(id, name, description, visible, type))
except KeyError:
pass
return tag_list_common, tag_list_misconception, tag_list_organisational | python | def create_tags_from_dict(cls, tag_dict):
"""
Build a tuple of list of Tag objects based on the tag_dict.
The tuple contains 3 lists.
- The first list contains skill tags
- The second list contains misconception tags
- The third list contains category tags
"""
tag_list_common = []
tag_list_misconception = []
tag_list_organisational = []
for tag in tag_dict:
try:
id = tag_dict[tag]["id"]
name = tag_dict[tag]["name"]
visible = tag_dict[tag]["visible"]
description = tag_dict[tag]["description"]
type = tag_dict[tag]["type"]
if type == 2:
tag_list_organisational.insert(int(tag), Tag(id, name, description, visible, type))
elif type == 1:
tag_list_misconception.insert(int(tag), Tag(id, name, description, visible, type))
else:
tag_list_common.insert(int(tag), Tag(id, name, description, visible, type))
except KeyError:
pass
return tag_list_common, tag_list_misconception, tag_list_organisational | ['def', 'create_tags_from_dict', '(', 'cls', ',', 'tag_dict', ')', ':', 'tag_list_common', '=', '[', ']', 'tag_list_misconception', '=', '[', ']', 'tag_list_organisational', '=', '[', ']', 'for', 'tag', 'in', 'tag_dict', ':', 'try', ':', 'id', '=', 'tag_dict', '[', 'tag', ']', '[', '"id"', ']', 'name', '=', 'tag_dict', '[', 'tag', ']', '[', '"name"', ']', 'visible', '=', 'tag_dict', '[', 'tag', ']', '[', '"visible"', ']', 'description', '=', 'tag_dict', '[', 'tag', ']', '[', '"description"', ']', 'type', '=', 'tag_dict', '[', 'tag', ']', '[', '"type"', ']', 'if', 'type', '==', '2', ':', 'tag_list_organisational', '.', 'insert', '(', 'int', '(', 'tag', ')', ',', 'Tag', '(', 'id', ',', 'name', ',', 'description', ',', 'visible', ',', 'type', ')', ')', 'elif', 'type', '==', '1', ':', 'tag_list_misconception', '.', 'insert', '(', 'int', '(', 'tag', ')', ',', 'Tag', '(', 'id', ',', 'name', ',', 'description', ',', 'visible', ',', 'type', ')', ')', 'else', ':', 'tag_list_common', '.', 'insert', '(', 'int', '(', 'tag', ')', ',', 'Tag', '(', 'id', ',', 'name', ',', 'description', ',', 'visible', ',', 'type', ')', ')', 'except', 'KeyError', ':', 'pass', 'return', 'tag_list_common', ',', 'tag_list_misconception', ',', 'tag_list_organisational'] | Build a tuple of list of Tag objects based on the tag_dict.
The tuple contains 3 lists.
- The first list contains skill tags
- The second list contains misconception tags
- The third list contains category tags | ['Build', 'a', 'tuple', 'of', 'list', 'of', 'Tag', 'objects', 'based', 'on', 'the', 'tag_dict', '.', 'The', 'tuple', 'contains', '3', 'lists', '.', '-', 'The', 'first', 'list', 'contains', 'skill', 'tags', '-', 'The', 'second', 'list', 'contains', 'misconception', 'tags', '-', 'The', 'third', 'list', 'contains', 'category', 'tags'] | train | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/common/tags.py#L73-L100 |
6,077 | Alignak-monitoring/alignak | alignak/daterange.py | AbstractDaterange.get_min_from_t | def get_min_from_t(self, timestamp):
"""Get next time from t where a timerange is valid (withing range)
:param timestamp: base time to look for the next one
:return: time where a timerange is valid
:rtype: int
"""
if self.is_time_valid(timestamp):
return timestamp
t_day_epoch = get_day(timestamp)
tr_mins = self.get_min_sec_from_morning()
return t_day_epoch + tr_mins | python | def get_min_from_t(self, timestamp):
"""Get next time from t where a timerange is valid (withing range)
:param timestamp: base time to look for the next one
:return: time where a timerange is valid
:rtype: int
"""
if self.is_time_valid(timestamp):
return timestamp
t_day_epoch = get_day(timestamp)
tr_mins = self.get_min_sec_from_morning()
return t_day_epoch + tr_mins | ['def', 'get_min_from_t', '(', 'self', ',', 'timestamp', ')', ':', 'if', 'self', '.', 'is_time_valid', '(', 'timestamp', ')', ':', 'return', 'timestamp', 't_day_epoch', '=', 'get_day', '(', 'timestamp', ')', 'tr_mins', '=', 'self', '.', 'get_min_sec_from_morning', '(', ')', 'return', 't_day_epoch', '+', 'tr_mins'] | Get next time from t where a timerange is valid (withing range)
:param timestamp: base time to look for the next one
:return: time where a timerange is valid
:rtype: int | ['Get', 'next', 'time', 'from', 't', 'where', 'a', 'timerange', 'is', 'valid', '(', 'withing', 'range', ')'] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L415-L426 |
6,078 | sorgerlab/indra | indra/preassembler/hierarchy_manager.py | HierarchyManager.get_parents | def get_parents(self, uri, type='all'):
"""Return parents of a given entry.
Parameters
----------
uri : str
The URI of the entry whose parents are to be returned. See the
get_uri method to construct this URI from a name space and id.
type : str
'all': return all parents irrespective of level;
'immediate': return only the immediate parents;
'top': return only the highest level parents
"""
# First do a quick dict lookup to see if there are any parents
all_parents = set(self.isa_or_partof_closure.get(uri, []))
# If there are no parents or we are looking for all, we can return here
if not all_parents or type == 'all':
return all_parents
# If we need immediate parents, we search again, this time knowing that
# the uri is definitely in the graph since it has some parents
if type == 'immediate':
node = rdflib.term.URIRef(uri)
immediate_parents = list(set(self.isa_or_partof_objects(node)))
return [p.toPython() for p in immediate_parents]
elif type == 'top':
top_parents = [p for p in all_parents if
not self.isa_or_partof_closure.get(p)]
return top_parents | python | def get_parents(self, uri, type='all'):
"""Return parents of a given entry.
Parameters
----------
uri : str
The URI of the entry whose parents are to be returned. See the
get_uri method to construct this URI from a name space and id.
type : str
'all': return all parents irrespective of level;
'immediate': return only the immediate parents;
'top': return only the highest level parents
"""
# First do a quick dict lookup to see if there are any parents
all_parents = set(self.isa_or_partof_closure.get(uri, []))
# If there are no parents or we are looking for all, we can return here
if not all_parents or type == 'all':
return all_parents
# If we need immediate parents, we search again, this time knowing that
# the uri is definitely in the graph since it has some parents
if type == 'immediate':
node = rdflib.term.URIRef(uri)
immediate_parents = list(set(self.isa_or_partof_objects(node)))
return [p.toPython() for p in immediate_parents]
elif type == 'top':
top_parents = [p for p in all_parents if
not self.isa_or_partof_closure.get(p)]
return top_parents | ['def', 'get_parents', '(', 'self', ',', 'uri', ',', 'type', '=', "'all'", ')', ':', '# First do a quick dict lookup to see if there are any parents', 'all_parents', '=', 'set', '(', 'self', '.', 'isa_or_partof_closure', '.', 'get', '(', 'uri', ',', '[', ']', ')', ')', '# If there are no parents or we are looking for all, we can return here', 'if', 'not', 'all_parents', 'or', 'type', '==', "'all'", ':', 'return', 'all_parents', '# If we need immediate parents, we search again, this time knowing that', '# the uri is definitely in the graph since it has some parents', 'if', 'type', '==', "'immediate'", ':', 'node', '=', 'rdflib', '.', 'term', '.', 'URIRef', '(', 'uri', ')', 'immediate_parents', '=', 'list', '(', 'set', '(', 'self', '.', 'isa_or_partof_objects', '(', 'node', ')', ')', ')', 'return', '[', 'p', '.', 'toPython', '(', ')', 'for', 'p', 'in', 'immediate_parents', ']', 'elif', 'type', '==', "'top'", ':', 'top_parents', '=', '[', 'p', 'for', 'p', 'in', 'all_parents', 'if', 'not', 'self', '.', 'isa_or_partof_closure', '.', 'get', '(', 'p', ')', ']', 'return', 'top_parents'] | Return parents of a given entry.
Parameters
----------
uri : str
The URI of the entry whose parents are to be returned. See the
get_uri method to construct this URI from a name space and id.
type : str
'all': return all parents irrespective of level;
'immediate': return only the immediate parents;
'top': return only the highest level parents | ['Return', 'parents', 'of', 'a', 'given', 'entry', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L411-L439 |
6,079 | cds-astro/mocpy | mocpy/tmoc/tmoc.py | TimeMOC.from_time_ranges | def from_time_ranges(cls, min_times, max_times, delta_t=DEFAULT_OBSERVATION_TIME):
"""
Create a TimeMOC from a range defined by two `astropy.time.Time`
Parameters
----------
min_times : `astropy.time.Time`
astropy times defining the left part of the intervals
max_times : `astropy.time.Time`
astropy times defining the right part of the intervals
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
time_moc : `~mocpy.tmoc.TimeMOC`
"""
min_times_arr = np.asarray(min_times.jd * TimeMOC.DAY_MICRO_SEC, dtype=int)
max_times_arr = np.asarray(max_times.jd * TimeMOC.DAY_MICRO_SEC, dtype=int)
intervals_arr = np.vstack((min_times_arr, max_times_arr + 1)).T
# degrade the TimeMoc to the order computer from ``delta_t``
order = TimeMOC.time_resolution_to_order(delta_t)
return TimeMOC(IntervalSet(intervals_arr)).degrade_to_order(order) | python | def from_time_ranges(cls, min_times, max_times, delta_t=DEFAULT_OBSERVATION_TIME):
"""
Create a TimeMOC from a range defined by two `astropy.time.Time`
Parameters
----------
min_times : `astropy.time.Time`
astropy times defining the left part of the intervals
max_times : `astropy.time.Time`
astropy times defining the right part of the intervals
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
time_moc : `~mocpy.tmoc.TimeMOC`
"""
min_times_arr = np.asarray(min_times.jd * TimeMOC.DAY_MICRO_SEC, dtype=int)
max_times_arr = np.asarray(max_times.jd * TimeMOC.DAY_MICRO_SEC, dtype=int)
intervals_arr = np.vstack((min_times_arr, max_times_arr + 1)).T
# degrade the TimeMoc to the order computer from ``delta_t``
order = TimeMOC.time_resolution_to_order(delta_t)
return TimeMOC(IntervalSet(intervals_arr)).degrade_to_order(order) | ['def', 'from_time_ranges', '(', 'cls', ',', 'min_times', ',', 'max_times', ',', 'delta_t', '=', 'DEFAULT_OBSERVATION_TIME', ')', ':', 'min_times_arr', '=', 'np', '.', 'asarray', '(', 'min_times', '.', 'jd', '*', 'TimeMOC', '.', 'DAY_MICRO_SEC', ',', 'dtype', '=', 'int', ')', 'max_times_arr', '=', 'np', '.', 'asarray', '(', 'max_times', '.', 'jd', '*', 'TimeMOC', '.', 'DAY_MICRO_SEC', ',', 'dtype', '=', 'int', ')', 'intervals_arr', '=', 'np', '.', 'vstack', '(', '(', 'min_times_arr', ',', 'max_times_arr', '+', '1', ')', ')', '.', 'T', '# degrade the TimeMoc to the order computer from ``delta_t``', 'order', '=', 'TimeMOC', '.', 'time_resolution_to_order', '(', 'delta_t', ')', 'return', 'TimeMOC', '(', 'IntervalSet', '(', 'intervals_arr', ')', ')', '.', 'degrade_to_order', '(', 'order', ')'] | Create a TimeMOC from a range defined by two `astropy.time.Time`
Parameters
----------
min_times : `astropy.time.Time`
astropy times defining the left part of the intervals
max_times : `astropy.time.Time`
astropy times defining the right part of the intervals
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
time_moc : `~mocpy.tmoc.TimeMOC` | ['Create', 'a', 'TimeMOC', 'from', 'a', 'range', 'defined', 'by', 'two', 'astropy', '.', 'time', '.', 'Time'] | train | https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/tmoc/tmoc.py#L56-L82 |
6,080 | Genida/django-appsettings | src/appsettings/settings.py | Setting.default_value | def default_value(self):
"""
Property to return the default value.
If the default value is callable and call_default is True, return
the result of default(). Else return default.
Returns:
object: the default value.
"""
if callable(self.default) and self.call_default:
return self.default()
return self.default | python | def default_value(self):
"""
Property to return the default value.
If the default value is callable and call_default is True, return
the result of default(). Else return default.
Returns:
object: the default value.
"""
if callable(self.default) and self.call_default:
return self.default()
return self.default | ['def', 'default_value', '(', 'self', ')', ':', 'if', 'callable', '(', 'self', '.', 'default', ')', 'and', 'self', '.', 'call_default', ':', 'return', 'self', '.', 'default', '(', ')', 'return', 'self', '.', 'default'] | Property to return the default value.
If the default value is callable and call_default is True, return
the result of default(). Else return default.
Returns:
object: the default value. | ['Property', 'to', 'return', 'the', 'default', 'value', '.'] | train | https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L446-L458 |
6,081 | diffeo/rejester | rejester/_task_master.py | TaskMaster.get_work_unit_status | def get_work_unit_status(self, work_spec_name, work_unit_key):
'''Get a high-level status for some work unit.
The return value is a dictionary. The only required key is
``status``, which could be any of:
``missing``
The work unit does not exist anywhere
``available``
The work unit is available for new workers; additional
keys include ``expiration`` (may be 0)
``pending``
The work unit is being worked on; additional keys include
``expiration`` and ``worker_id`` (usually)
``blocked``
The work unit is waiting for some other work units to finish;
additional keys include ``depends_on``
``finished``
The work unit has completed
``failed``
The work unit failed; additional keys include ``traceback``
:param str work_spec_name: name of the work spec
:param str work_unit_name: name of the work unit
:return: dictionary description of summary status
'''
with self.registry.lock(identifier=self.worker_id) as session:
# In the available list?
(unit,priority) = session.get(WORK_UNITS_ + work_spec_name,
work_unit_key, include_priority=True)
if unit:
result = {}
if priority < time.time():
result['status'] = 'available'
else:
result['status'] = 'pending'
result['expiration'] = priority
# ...is anyone working on it?
worker = session.get(WORK_UNITS_ + work_spec_name + "_locks",
work_unit_key)
if worker:
result['worker_id'] = worker
return result
# In the finished list?
unit = session.get(WORK_UNITS_ + work_spec_name + _FINISHED,
work_unit_key)
if unit:
return { 'status': 'finished' }
# In the failed list?
unit = session.get(WORK_UNITS_ + work_spec_name + _FAILED,
work_unit_key)
if unit:
result = { 'status': 'failed' }
if 'traceback' in unit:
result['traceback'] = unit['traceback']
return result
# In the blocked list?
unit = session.get(WORK_UNITS_ + work_spec_name + _BLOCKED,
work_unit_key)
if unit:
# This should always have *something*, right?
deps = session.get(WORK_UNITS_ + work_spec_name + _DEPENDS,
work_unit_key, default=[])
result = { 'status': 'blocked',
'depends_on': deps }
return result
return { 'status': 'missing' } | python | def get_work_unit_status(self, work_spec_name, work_unit_key):
'''Get a high-level status for some work unit.
The return value is a dictionary. The only required key is
``status``, which could be any of:
``missing``
The work unit does not exist anywhere
``available``
The work unit is available for new workers; additional
keys include ``expiration`` (may be 0)
``pending``
The work unit is being worked on; additional keys include
``expiration`` and ``worker_id`` (usually)
``blocked``
The work unit is waiting for some other work units to finish;
additional keys include ``depends_on``
``finished``
The work unit has completed
``failed``
The work unit failed; additional keys include ``traceback``
:param str work_spec_name: name of the work spec
:param str work_unit_name: name of the work unit
:return: dictionary description of summary status
'''
with self.registry.lock(identifier=self.worker_id) as session:
# In the available list?
(unit,priority) = session.get(WORK_UNITS_ + work_spec_name,
work_unit_key, include_priority=True)
if unit:
result = {}
if priority < time.time():
result['status'] = 'available'
else:
result['status'] = 'pending'
result['expiration'] = priority
# ...is anyone working on it?
worker = session.get(WORK_UNITS_ + work_spec_name + "_locks",
work_unit_key)
if worker:
result['worker_id'] = worker
return result
# In the finished list?
unit = session.get(WORK_UNITS_ + work_spec_name + _FINISHED,
work_unit_key)
if unit:
return { 'status': 'finished' }
# In the failed list?
unit = session.get(WORK_UNITS_ + work_spec_name + _FAILED,
work_unit_key)
if unit:
result = { 'status': 'failed' }
if 'traceback' in unit:
result['traceback'] = unit['traceback']
return result
# In the blocked list?
unit = session.get(WORK_UNITS_ + work_spec_name + _BLOCKED,
work_unit_key)
if unit:
# This should always have *something*, right?
deps = session.get(WORK_UNITS_ + work_spec_name + _DEPENDS,
work_unit_key, default=[])
result = { 'status': 'blocked',
'depends_on': deps }
return result
return { 'status': 'missing' } | ['def', 'get_work_unit_status', '(', 'self', ',', 'work_spec_name', ',', 'work_unit_key', ')', ':', 'with', 'self', '.', 'registry', '.', 'lock', '(', 'identifier', '=', 'self', '.', 'worker_id', ')', 'as', 'session', ':', '# In the available list?', '(', 'unit', ',', 'priority', ')', '=', 'session', '.', 'get', '(', 'WORK_UNITS_', '+', 'work_spec_name', ',', 'work_unit_key', ',', 'include_priority', '=', 'True', ')', 'if', 'unit', ':', 'result', '=', '{', '}', 'if', 'priority', '<', 'time', '.', 'time', '(', ')', ':', 'result', '[', "'status'", ']', '=', "'available'", 'else', ':', 'result', '[', "'status'", ']', '=', "'pending'", 'result', '[', "'expiration'", ']', '=', 'priority', '# ...is anyone working on it?', 'worker', '=', 'session', '.', 'get', '(', 'WORK_UNITS_', '+', 'work_spec_name', '+', '"_locks"', ',', 'work_unit_key', ')', 'if', 'worker', ':', 'result', '[', "'worker_id'", ']', '=', 'worker', 'return', 'result', '# In the finished list?', 'unit', '=', 'session', '.', 'get', '(', 'WORK_UNITS_', '+', 'work_spec_name', '+', '_FINISHED', ',', 'work_unit_key', ')', 'if', 'unit', ':', 'return', '{', "'status'", ':', "'finished'", '}', '# In the failed list?', 'unit', '=', 'session', '.', 'get', '(', 'WORK_UNITS_', '+', 'work_spec_name', '+', '_FAILED', ',', 'work_unit_key', ')', 'if', 'unit', ':', 'result', '=', '{', "'status'", ':', "'failed'", '}', 'if', "'traceback'", 'in', 'unit', ':', 'result', '[', "'traceback'", ']', '=', 'unit', '[', "'traceback'", ']', 'return', 'result', '# In the blocked list?', 'unit', '=', 'session', '.', 'get', '(', 'WORK_UNITS_', '+', 'work_spec_name', '+', '_BLOCKED', ',', 'work_unit_key', ')', 'if', 'unit', ':', '# This should always have *something*, right?', 'deps', '=', 'session', '.', 'get', '(', 'WORK_UNITS_', '+', 'work_spec_name', '+', '_DEPENDS', ',', 'work_unit_key', ',', 'default', '=', '[', ']', ')', 'result', '=', '{', "'status'", ':', "'blocked'", ',', "'depends_on'", ':', 'deps', '}', 'return', 'result', 'return', '{', "'status'", ':', "'missing'", '}'] | Get a high-level status for some work unit.
The return value is a dictionary. The only required key is
``status``, which could be any of:
``missing``
The work unit does not exist anywhere
``available``
The work unit is available for new workers; additional
keys include ``expiration`` (may be 0)
``pending``
The work unit is being worked on; additional keys include
``expiration`` and ``worker_id`` (usually)
``blocked``
The work unit is waiting for some other work units to finish;
additional keys include ``depends_on``
``finished``
The work unit has completed
``failed``
The work unit failed; additional keys include ``traceback``
:param str work_spec_name: name of the work spec
:param str work_unit_name: name of the work unit
:return: dictionary description of summary status | ['Get', 'a', 'high', '-', 'level', 'status', 'for', 'some', 'work', 'unit', '.'] | train | https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_task_master.py#L1249-L1319 |
6,082 | gregmuellegger/django-superform | django_superform/fields.py | ModelFormField.shall_save | def shall_save(self, form, name, composite_form):
"""
Return ``True`` if the given ``composite_form`` (the nested form of
this field) shall be saved. Return ``False`` if the form shall not be
saved together with the super-form.
By default it will return ``False`` if the form was not changed and the
``empty_permitted`` argument for the form was set to ``True``. That way
you can allow empty forms.
"""
if composite_form.empty_permitted and not composite_form.has_changed():
return False
return True | python | def shall_save(self, form, name, composite_form):
"""
Return ``True`` if the given ``composite_form`` (the nested form of
this field) shall be saved. Return ``False`` if the form shall not be
saved together with the super-form.
By default it will return ``False`` if the form was not changed and the
``empty_permitted`` argument for the form was set to ``True``. That way
you can allow empty forms.
"""
if composite_form.empty_permitted and not composite_form.has_changed():
return False
return True | ['def', 'shall_save', '(', 'self', ',', 'form', ',', 'name', ',', 'composite_form', ')', ':', 'if', 'composite_form', '.', 'empty_permitted', 'and', 'not', 'composite_form', '.', 'has_changed', '(', ')', ':', 'return', 'False', 'return', 'True'] | Return ``True`` if the given ``composite_form`` (the nested form of
this field) shall be saved. Return ``False`` if the form shall not be
saved together with the super-form.
By default it will return ``False`` if the form was not changed and the
``empty_permitted`` argument for the form was set to ``True``. That way
you can allow empty forms. | ['Return', 'True', 'if', 'the', 'given', 'composite_form', '(', 'the', 'nested', 'form', 'of', 'this', 'field', ')', 'shall', 'be', 'saved', '.', 'Return', 'False', 'if', 'the', 'form', 'shall', 'not', 'be', 'saved', 'together', 'with', 'the', 'super', '-', 'form', '.'] | train | https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L253-L265 |
6,083 | AshleySetter/optoanalysis | optoanalysis/optoanalysis/optoanalysis.py | animate_2Dscatter | def animate_2Dscatter(x, y, NumAnimatedPoints=50, NTrailPoints=20,
xlabel="", ylabel="",
xlims=None, ylims=None, filename="testAnim.mp4",
bitrate=1e5, dpi=5e2, fps=30, figsize = [6, 6]):
"""
Animates x and y - where x and y are 1d arrays of x and y
positions and it plots x[i:i+NTrailPoints] and y[i:i+NTrailPoints]
against each other and iterates through i.
"""
fig, ax = _plt.subplots(figsize = figsize)
alphas = _np.linspace(0.1, 1, NTrailPoints)
rgba_colors = _np.zeros((NTrailPoints,4))
# for red the first column needs to be one
rgba_colors[:,0] = 1.0
# the fourth column needs to be your alphas
rgba_colors[:, 3] = alphas
scatter = ax.scatter(x[0:NTrailPoints], y[0:NTrailPoints], color=rgba_colors)
if xlims == None:
xlims = (min(x), max(x))
if ylims == None:
ylims = (min(y), max(y))
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def animate(i, scatter):
scatter.axes.clear() # clear old scatter object
scatter = ax.scatter(x[i:i+NTrailPoints], y[i:i+NTrailPoints], color=rgba_colors, animated=True)
# create new scatter with updated data
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return scatter,
ani = _animation.FuncAnimation(fig, animate, _np.arange(1, NumAnimatedPoints),
interval=25, blit=True, fargs=[scatter])
ani.save(filename, bitrate=bitrate, dpi=dpi, fps=fps)
return None | python | def animate_2Dscatter(x, y, NumAnimatedPoints=50, NTrailPoints=20,
xlabel="", ylabel="",
xlims=None, ylims=None, filename="testAnim.mp4",
bitrate=1e5, dpi=5e2, fps=30, figsize = [6, 6]):
"""
Animates x and y - where x and y are 1d arrays of x and y
positions and it plots x[i:i+NTrailPoints] and y[i:i+NTrailPoints]
against each other and iterates through i.
"""
fig, ax = _plt.subplots(figsize = figsize)
alphas = _np.linspace(0.1, 1, NTrailPoints)
rgba_colors = _np.zeros((NTrailPoints,4))
# for red the first column needs to be one
rgba_colors[:,0] = 1.0
# the fourth column needs to be your alphas
rgba_colors[:, 3] = alphas
scatter = ax.scatter(x[0:NTrailPoints], y[0:NTrailPoints], color=rgba_colors)
if xlims == None:
xlims = (min(x), max(x))
if ylims == None:
ylims = (min(y), max(y))
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def animate(i, scatter):
scatter.axes.clear() # clear old scatter object
scatter = ax.scatter(x[i:i+NTrailPoints], y[i:i+NTrailPoints], color=rgba_colors, animated=True)
# create new scatter with updated data
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return scatter,
ani = _animation.FuncAnimation(fig, animate, _np.arange(1, NumAnimatedPoints),
interval=25, blit=True, fargs=[scatter])
ani.save(filename, bitrate=bitrate, dpi=dpi, fps=fps)
return None | ['def', 'animate_2Dscatter', '(', 'x', ',', 'y', ',', 'NumAnimatedPoints', '=', '50', ',', 'NTrailPoints', '=', '20', ',', 'xlabel', '=', '""', ',', 'ylabel', '=', '""', ',', 'xlims', '=', 'None', ',', 'ylims', '=', 'None', ',', 'filename', '=', '"testAnim.mp4"', ',', 'bitrate', '=', '1e5', ',', 'dpi', '=', '5e2', ',', 'fps', '=', '30', ',', 'figsize', '=', '[', '6', ',', '6', ']', ')', ':', 'fig', ',', 'ax', '=', '_plt', '.', 'subplots', '(', 'figsize', '=', 'figsize', ')', 'alphas', '=', '_np', '.', 'linspace', '(', '0.1', ',', '1', ',', 'NTrailPoints', ')', 'rgba_colors', '=', '_np', '.', 'zeros', '(', '(', 'NTrailPoints', ',', '4', ')', ')', '# for red the first column needs to be one', 'rgba_colors', '[', ':', ',', '0', ']', '=', '1.0', '# the fourth column needs to be your alphas', 'rgba_colors', '[', ':', ',', '3', ']', '=', 'alphas', 'scatter', '=', 'ax', '.', 'scatter', '(', 'x', '[', '0', ':', 'NTrailPoints', ']', ',', 'y', '[', '0', ':', 'NTrailPoints', ']', ',', 'color', '=', 'rgba_colors', ')', 'if', 'xlims', '==', 'None', ':', 'xlims', '=', '(', 'min', '(', 'x', ')', ',', 'max', '(', 'x', ')', ')', 'if', 'ylims', '==', 'None', ':', 'ylims', '=', '(', 'min', '(', 'y', ')', ',', 'max', '(', 'y', ')', ')', 'ax', '.', 'set_xlim', '(', 'xlims', ')', 'ax', '.', 'set_ylim', '(', 'ylims', ')', 'ax', '.', 'set_xlabel', '(', 'xlabel', ')', 'ax', '.', 'set_ylabel', '(', 'ylabel', ')', 'def', 'animate', '(', 'i', ',', 'scatter', ')', ':', 'scatter', '.', 'axes', '.', 'clear', '(', ')', '# clear old scatter object', 'scatter', '=', 'ax', '.', 'scatter', '(', 'x', '[', 'i', ':', 'i', '+', 'NTrailPoints', ']', ',', 'y', '[', 'i', ':', 'i', '+', 'NTrailPoints', ']', ',', 'color', '=', 'rgba_colors', ',', 'animated', '=', 'True', ')', '# create new scatter with updated data', 'ax', '.', 'set_xlim', '(', 'xlims', ')', 'ax', '.', 'set_ylim', '(', 'ylims', ')', 'ax', '.', 'set_xlabel', '(', 'xlabel', ')', 'ax', '.', 'set_ylabel', '(', 'ylabel', ')', 'return', 'scatter', ',', 'ani', '=', '_animation', '.', 'FuncAnimation', '(', 'fig', ',', 'animate', ',', '_np', '.', 'arange', '(', '1', ',', 'NumAnimatedPoints', ')', ',', 'interval', '=', '25', ',', 'blit', '=', 'True', ',', 'fargs', '=', '[', 'scatter', ']', ')', 'ani', '.', 'save', '(', 'filename', ',', 'bitrate', '=', 'bitrate', ',', 'dpi', '=', 'dpi', ',', 'fps', '=', 'fps', ')', 'return', 'None'] | Animates x and y - where x and y are 1d arrays of x and y
positions and it plots x[i:i+NTrailPoints] and y[i:i+NTrailPoints]
against each other and iterates through i. | ['Animates', 'x', 'and', 'y', '-', 'where', 'x', 'and', 'y', 'are', '1d', 'arrays', 'of', 'x', 'and', 'y', 'positions', 'and', 'it', 'plots', 'x', '[', 'i', ':', 'i', '+', 'NTrailPoints', ']', 'and', 'y', '[', 'i', ':', 'i', '+', 'NTrailPoints', ']', 'against', 'each', 'other', 'and', 'iterates', 'through', 'i', '.'] | train | https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L2605-L2651 |
6,084 | Apitax/Apitax | apitax/api/controllers/migrations/scriptax_controller.py | get_driver_script | def get_driver_script(name, name2=None): # noqa: E501
"""Retrieve the contents of a script
Retrieve the contents of a script # noqa: E501
:param name2: Get status of a driver with this name
:type name2: str
:param name: The script name.
:type name: str
:rtype: Response
"""
response = errorIfUnauthorized(role='user')
if response:
return response
else:
response = ApitaxResponse()
print(name)
print(name2)
driver: Driver = LoadedDrivers.getDriver(name2)
response.body.add({'content': driver.getDriverScript(name)})
return Response(status=200, body=response.getResponseBody()) | python | def get_driver_script(name, name2=None): # noqa: E501
"""Retrieve the contents of a script
Retrieve the contents of a script # noqa: E501
:param name2: Get status of a driver with this name
:type name2: str
:param name: The script name.
:type name: str
:rtype: Response
"""
response = errorIfUnauthorized(role='user')
if response:
return response
else:
response = ApitaxResponse()
print(name)
print(name2)
driver: Driver = LoadedDrivers.getDriver(name2)
response.body.add({'content': driver.getDriverScript(name)})
return Response(status=200, body=response.getResponseBody()) | ['def', 'get_driver_script', '(', 'name', ',', 'name2', '=', 'None', ')', ':', '# noqa: E501', 'response', '=', 'errorIfUnauthorized', '(', 'role', '=', "'user'", ')', 'if', 'response', ':', 'return', 'response', 'else', ':', 'response', '=', 'ApitaxResponse', '(', ')', 'print', '(', 'name', ')', 'print', '(', 'name2', ')', 'driver', ':', 'Driver', '=', 'LoadedDrivers', '.', 'getDriver', '(', 'name2', ')', 'response', '.', 'body', '.', 'add', '(', '{', "'content'", ':', 'driver', '.', 'getDriverScript', '(', 'name', ')', '}', ')', 'return', 'Response', '(', 'status', '=', '200', ',', 'body', '=', 'response', '.', 'getResponseBody', '(', ')', ')'] | Retrieve the contents of a script
Retrieve the contents of a script # noqa: E501
:param name2: Get status of a driver with this name
:type name2: str
:param name: The script name.
:type name: str
:rtype: Response | ['Retrieve', 'the', 'contents', 'of', 'a', 'script'] | train | https://github.com/Apitax/Apitax/blob/3883e45f17e01eba4edac9d1bba42f0e7a748682/apitax/api/controllers/migrations/scriptax_controller.py#L76-L102 |
6,085 | materialsproject/pymatgen | pymatgen/analysis/reaction_calculator.py | BalancedReaction.normalize_to | def normalize_to(self, comp, factor=1):
"""
Normalizes the reaction to one of the compositions.
By default, normalizes such that the composition given has a
coefficient of 1. Another factor can be specified.
Args:
comp (Composition): Composition to normalize to
factor (float): Factor to normalize to. Defaults to 1.
"""
scale_factor = abs(1 / self._coeffs[self._all_comp.index(comp)]
* factor)
self._coeffs = [c * scale_factor for c in self._coeffs] | python | def normalize_to(self, comp, factor=1):
"""
Normalizes the reaction to one of the compositions.
By default, normalizes such that the composition given has a
coefficient of 1. Another factor can be specified.
Args:
comp (Composition): Composition to normalize to
factor (float): Factor to normalize to. Defaults to 1.
"""
scale_factor = abs(1 / self._coeffs[self._all_comp.index(comp)]
* factor)
self._coeffs = [c * scale_factor for c in self._coeffs] | ['def', 'normalize_to', '(', 'self', ',', 'comp', ',', 'factor', '=', '1', ')', ':', 'scale_factor', '=', 'abs', '(', '1', '/', 'self', '.', '_coeffs', '[', 'self', '.', '_all_comp', '.', 'index', '(', 'comp', ')', ']', '*', 'factor', ')', 'self', '.', '_coeffs', '=', '[', 'c', '*', 'scale_factor', 'for', 'c', 'in', 'self', '.', '_coeffs', ']'] | Normalizes the reaction to one of the compositions.
By default, normalizes such that the composition given has a
coefficient of 1. Another factor can be specified.
Args:
comp (Composition): Composition to normalize to
factor (float): Factor to normalize to. Defaults to 1. | ['Normalizes', 'the', 'reaction', 'to', 'one', 'of', 'the', 'compositions', '.', 'By', 'default', 'normalizes', 'such', 'that', 'the', 'composition', 'given', 'has', 'a', 'coefficient', 'of', '1', '.', 'Another', 'factor', 'can', 'be', 'specified', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/reaction_calculator.py#L94-L106 |
6,086 | twisted/axiom | axiom/store.py | Store.createSQL | def createSQL(self, sql, args=()):
"""
For use with auto-committing statements such as CREATE TABLE or CREATE
INDEX.
"""
before = time.time()
self._execSQL(sql, args)
after = time.time()
if after - before > 2.0:
log.msg('Extremely long CREATE: %s' % (after - before,))
log.msg(sql) | python | def createSQL(self, sql, args=()):
"""
For use with auto-committing statements such as CREATE TABLE or CREATE
INDEX.
"""
before = time.time()
self._execSQL(sql, args)
after = time.time()
if after - before > 2.0:
log.msg('Extremely long CREATE: %s' % (after - before,))
log.msg(sql) | ['def', 'createSQL', '(', 'self', ',', 'sql', ',', 'args', '=', '(', ')', ')', ':', 'before', '=', 'time', '.', 'time', '(', ')', 'self', '.', '_execSQL', '(', 'sql', ',', 'args', ')', 'after', '=', 'time', '.', 'time', '(', ')', 'if', 'after', '-', 'before', '>', '2.0', ':', 'log', '.', 'msg', '(', "'Extremely long CREATE: %s'", '%', '(', 'after', '-', 'before', ',', ')', ')', 'log', '.', 'msg', '(', 'sql', ')'] | For use with auto-committing statements such as CREATE TABLE or CREATE
INDEX. | ['For', 'use', 'with', 'auto', '-', 'committing', 'statements', 'such', 'as', 'CREATE', 'TABLE', 'or', 'CREATE', 'INDEX', '.'] | train | https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/store.py#L2360-L2370 |
6,087 | molmod/molmod | molmod/graphs.py | GraphSearch._iter_candidate_groups | def _iter_candidate_groups(self, init_match, edges0, edges1):
"""Divide the edges into groups"""
# collect all end vertices0 and end vertices1 that belong to the same
# group.
sources = {}
for start_vertex0, end_vertex0 in edges0:
l = sources.setdefault(start_vertex0, [])
l.append(end_vertex0)
dests = {}
for start_vertex1, end_vertex1 in edges1:
start_vertex0 = init_match.reverse[start_vertex1]
l = dests.setdefault(start_vertex0, [])
l.append(end_vertex1)
for start_vertex0, end_vertices0 in sources.items():
end_vertices1 = dests.get(start_vertex0, [])
yield end_vertices0, end_vertices1 | python | def _iter_candidate_groups(self, init_match, edges0, edges1):
"""Divide the edges into groups"""
# collect all end vertices0 and end vertices1 that belong to the same
# group.
sources = {}
for start_vertex0, end_vertex0 in edges0:
l = sources.setdefault(start_vertex0, [])
l.append(end_vertex0)
dests = {}
for start_vertex1, end_vertex1 in edges1:
start_vertex0 = init_match.reverse[start_vertex1]
l = dests.setdefault(start_vertex0, [])
l.append(end_vertex1)
for start_vertex0, end_vertices0 in sources.items():
end_vertices1 = dests.get(start_vertex0, [])
yield end_vertices0, end_vertices1 | ['def', '_iter_candidate_groups', '(', 'self', ',', 'init_match', ',', 'edges0', ',', 'edges1', ')', ':', '# collect all end vertices0 and end vertices1 that belong to the same', '# group.', 'sources', '=', '{', '}', 'for', 'start_vertex0', ',', 'end_vertex0', 'in', 'edges0', ':', 'l', '=', 'sources', '.', 'setdefault', '(', 'start_vertex0', ',', '[', ']', ')', 'l', '.', 'append', '(', 'end_vertex0', ')', 'dests', '=', '{', '}', 'for', 'start_vertex1', ',', 'end_vertex1', 'in', 'edges1', ':', 'start_vertex0', '=', 'init_match', '.', 'reverse', '[', 'start_vertex1', ']', 'l', '=', 'dests', '.', 'setdefault', '(', 'start_vertex0', ',', '[', ']', ')', 'l', '.', 'append', '(', 'end_vertex1', ')', 'for', 'start_vertex0', ',', 'end_vertices0', 'in', 'sources', '.', 'items', '(', ')', ':', 'end_vertices1', '=', 'dests', '.', 'get', '(', 'start_vertex0', ',', '[', ']', ')', 'yield', 'end_vertices0', ',', 'end_vertices1'] | Divide the edges into groups | ['Divide', 'the', 'edges', 'into', 'groups'] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1597-L1612 |
6,088 | philgyford/django-spectator | spectator/core/utils.py | chartify | def chartify(qs, score_field, cutoff=0, ensure_chartiness=True):
"""
Given a QuerySet it will go through and add a `chart_position` property to
each object returning a list of the objects.
If adjacent objects have the same 'score' (based on `score_field`) then
they will have the same `chart_position`. This can then be used in
templates for the `value` of <li> elements in an <ol>.
By default any objects with a score of 0 or less will be removed.
By default, if all the items in the chart have the same position, no items
will be returned (it's not much of a chart).
Keyword arguments:
qs -- The QuerySet
score_field -- The name of the numeric field that each object in the
QuerySet has, that will be used to compare their positions.
cutoff -- Any objects with a score of this value or below will be removed
from the list. Set to None to disable this.
ensure_chartiness -- If True, then if all items in the list have the same
score, an empty list will be returned.
"""
chart = []
position = 0
prev_obj = None
for counter, obj in enumerate(qs):
score = getattr(obj, score_field)
if score != getattr(prev_obj, score_field, None):
position = counter + 1
if cutoff is None or score > cutoff:
obj.chart_position = position
chart.append(obj)
prev_obj = obj
if ensure_chartiness and len(chart) > 0:
if getattr(chart[0], score_field) == getattr(chart[-1], score_field):
chart = []
return chart | python | def chartify(qs, score_field, cutoff=0, ensure_chartiness=True):
"""
Given a QuerySet it will go through and add a `chart_position` property to
each object returning a list of the objects.
If adjacent objects have the same 'score' (based on `score_field`) then
they will have the same `chart_position`. This can then be used in
templates for the `value` of <li> elements in an <ol>.
By default any objects with a score of 0 or less will be removed.
By default, if all the items in the chart have the same position, no items
will be returned (it's not much of a chart).
Keyword arguments:
qs -- The QuerySet
score_field -- The name of the numeric field that each object in the
QuerySet has, that will be used to compare their positions.
cutoff -- Any objects with a score of this value or below will be removed
from the list. Set to None to disable this.
ensure_chartiness -- If True, then if all items in the list have the same
score, an empty list will be returned.
"""
chart = []
position = 0
prev_obj = None
for counter, obj in enumerate(qs):
score = getattr(obj, score_field)
if score != getattr(prev_obj, score_field, None):
position = counter + 1
if cutoff is None or score > cutoff:
obj.chart_position = position
chart.append(obj)
prev_obj = obj
if ensure_chartiness and len(chart) > 0:
if getattr(chart[0], score_field) == getattr(chart[-1], score_field):
chart = []
return chart | ['def', 'chartify', '(', 'qs', ',', 'score_field', ',', 'cutoff', '=', '0', ',', 'ensure_chartiness', '=', 'True', ')', ':', 'chart', '=', '[', ']', 'position', '=', '0', 'prev_obj', '=', 'None', 'for', 'counter', ',', 'obj', 'in', 'enumerate', '(', 'qs', ')', ':', 'score', '=', 'getattr', '(', 'obj', ',', 'score_field', ')', 'if', 'score', '!=', 'getattr', '(', 'prev_obj', ',', 'score_field', ',', 'None', ')', ':', 'position', '=', 'counter', '+', '1', 'if', 'cutoff', 'is', 'None', 'or', 'score', '>', 'cutoff', ':', 'obj', '.', 'chart_position', '=', 'position', 'chart', '.', 'append', '(', 'obj', ')', 'prev_obj', '=', 'obj', 'if', 'ensure_chartiness', 'and', 'len', '(', 'chart', ')', '>', '0', ':', 'if', 'getattr', '(', 'chart', '[', '0', ']', ',', 'score_field', ')', '==', 'getattr', '(', 'chart', '[', '-', '1', ']', ',', 'score_field', ')', ':', 'chart', '=', '[', ']', 'return', 'chart'] | Given a QuerySet it will go through and add a `chart_position` property to
each object returning a list of the objects.
If adjacent objects have the same 'score' (based on `score_field`) then
they will have the same `chart_position`. This can then be used in
templates for the `value` of <li> elements in an <ol>.
By default any objects with a score of 0 or less will be removed.
By default, if all the items in the chart have the same position, no items
will be returned (it's not much of a chart).
Keyword arguments:
qs -- The QuerySet
score_field -- The name of the numeric field that each object in the
QuerySet has, that will be used to compare their positions.
cutoff -- Any objects with a score of this value or below will be removed
from the list. Set to None to disable this.
ensure_chartiness -- If True, then if all items in the list have the same
score, an empty list will be returned. | ['Given', 'a', 'QuerySet', 'it', 'will', 'go', 'through', 'and', 'add', 'a', 'chart_position', 'property', 'to', 'each', 'object', 'returning', 'a', 'list', 'of', 'the', 'objects', '.'] | train | https://github.com/philgyford/django-spectator/blob/f3c72004f9caa1fde0f5a3b2f0d2bf285fc01ada/spectator/core/utils.py#L28-L71 |
6,089 | openid/python-openid | openid/consumer/discover.py | findOPLocalIdentifier | def findOPLocalIdentifier(service_element, type_uris):
"""Find the OP-Local Identifier for this xrd:Service element.
This considers openid:Delegate to be a synonym for xrd:LocalID if
both OpenID 1.X and OpenID 2.0 types are present. If only OpenID
1.X is present, it returns the value of openid:Delegate. If only
OpenID 2.0 is present, it returns the value of xrd:LocalID. If
there is more than one LocalID tag and the values are different,
it raises a DiscoveryFailure. This is also triggered when the
xrd:LocalID and openid:Delegate tags are different.
@param service_element: The xrd:Service element
@type service_element: ElementTree.Node
@param type_uris: The xrd:Type values present in this service
element. This function could extract them, but higher level
code needs to do that anyway.
@type type_uris: [str]
@raises DiscoveryFailure: when discovery fails.
@returns: The OP-Local Identifier for this service element, if one
is present, or None otherwise.
@rtype: str or unicode or NoneType
"""
# XXX: Test this function on its own!
# Build the list of tags that could contain the OP-Local Identifier
local_id_tags = []
if (OPENID_1_1_TYPE in type_uris or
OPENID_1_0_TYPE in type_uris):
local_id_tags.append(nsTag(OPENID_1_0_NS, 'Delegate'))
if OPENID_2_0_TYPE in type_uris:
local_id_tags.append(nsTag(XRD_NS_2_0, 'LocalID'))
# Walk through all the matching tags and make sure that they all
# have the same value
local_id = None
for local_id_tag in local_id_tags:
for local_id_element in service_element.findall(local_id_tag):
if local_id is None:
local_id = local_id_element.text
elif local_id != local_id_element.text:
format = 'More than one %r tag found in one service element'
message = format % (local_id_tag,)
raise DiscoveryFailure(message, None)
return local_id | python | def findOPLocalIdentifier(service_element, type_uris):
"""Find the OP-Local Identifier for this xrd:Service element.
This considers openid:Delegate to be a synonym for xrd:LocalID if
both OpenID 1.X and OpenID 2.0 types are present. If only OpenID
1.X is present, it returns the value of openid:Delegate. If only
OpenID 2.0 is present, it returns the value of xrd:LocalID. If
there is more than one LocalID tag and the values are different,
it raises a DiscoveryFailure. This is also triggered when the
xrd:LocalID and openid:Delegate tags are different.
@param service_element: The xrd:Service element
@type service_element: ElementTree.Node
@param type_uris: The xrd:Type values present in this service
element. This function could extract them, but higher level
code needs to do that anyway.
@type type_uris: [str]
@raises DiscoveryFailure: when discovery fails.
@returns: The OP-Local Identifier for this service element, if one
is present, or None otherwise.
@rtype: str or unicode or NoneType
"""
# XXX: Test this function on its own!
# Build the list of tags that could contain the OP-Local Identifier
local_id_tags = []
if (OPENID_1_1_TYPE in type_uris or
OPENID_1_0_TYPE in type_uris):
local_id_tags.append(nsTag(OPENID_1_0_NS, 'Delegate'))
if OPENID_2_0_TYPE in type_uris:
local_id_tags.append(nsTag(XRD_NS_2_0, 'LocalID'))
# Walk through all the matching tags and make sure that they all
# have the same value
local_id = None
for local_id_tag in local_id_tags:
for local_id_element in service_element.findall(local_id_tag):
if local_id is None:
local_id = local_id_element.text
elif local_id != local_id_element.text:
format = 'More than one %r tag found in one service element'
message = format % (local_id_tag,)
raise DiscoveryFailure(message, None)
return local_id | ['def', 'findOPLocalIdentifier', '(', 'service_element', ',', 'type_uris', ')', ':', '# XXX: Test this function on its own!', '# Build the list of tags that could contain the OP-Local Identifier', 'local_id_tags', '=', '[', ']', 'if', '(', 'OPENID_1_1_TYPE', 'in', 'type_uris', 'or', 'OPENID_1_0_TYPE', 'in', 'type_uris', ')', ':', 'local_id_tags', '.', 'append', '(', 'nsTag', '(', 'OPENID_1_0_NS', ',', "'Delegate'", ')', ')', 'if', 'OPENID_2_0_TYPE', 'in', 'type_uris', ':', 'local_id_tags', '.', 'append', '(', 'nsTag', '(', 'XRD_NS_2_0', ',', "'LocalID'", ')', ')', '# Walk through all the matching tags and make sure that they all', '# have the same value', 'local_id', '=', 'None', 'for', 'local_id_tag', 'in', 'local_id_tags', ':', 'for', 'local_id_element', 'in', 'service_element', '.', 'findall', '(', 'local_id_tag', ')', ':', 'if', 'local_id', 'is', 'None', ':', 'local_id', '=', 'local_id_element', '.', 'text', 'elif', 'local_id', '!=', 'local_id_element', '.', 'text', ':', 'format', '=', "'More than one %r tag found in one service element'", 'message', '=', 'format', '%', '(', 'local_id_tag', ',', ')', 'raise', 'DiscoveryFailure', '(', 'message', ',', 'None', ')', 'return', 'local_id'] | Find the OP-Local Identifier for this xrd:Service element.
This considers openid:Delegate to be a synonym for xrd:LocalID if
both OpenID 1.X and OpenID 2.0 types are present. If only OpenID
1.X is present, it returns the value of openid:Delegate. If only
OpenID 2.0 is present, it returns the value of xrd:LocalID. If
there is more than one LocalID tag and the values are different,
it raises a DiscoveryFailure. This is also triggered when the
xrd:LocalID and openid:Delegate tags are different.
@param service_element: The xrd:Service element
@type service_element: ElementTree.Node
@param type_uris: The xrd:Type values present in this service
element. This function could extract them, but higher level
code needs to do that anyway.
@type type_uris: [str]
@raises DiscoveryFailure: when discovery fails.
@returns: The OP-Local Identifier for this service element, if one
is present, or None otherwise.
@rtype: str or unicode or NoneType | ['Find', 'the', 'OP', '-', 'Local', 'Identifier', 'for', 'this', 'xrd', ':', 'Service', 'element', '.'] | train | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/consumer/discover.py#L253-L301 |
6,090 | DataBiosphere/toil | src/toil/__init__.py | lookupEnvVar | def lookupEnvVar(name, envName, defaultValue):
"""
Use this for looking up environment variables that control Toil and are important enough to
log the result of that lookup.
:param str name: the human readable name of the variable
:param str envName: the name of the environment variable to lookup
:param str defaultValue: the fall-back value
:return: the value of the environment variable or the default value the variable is not set
:rtype: str
"""
try:
value = os.environ[envName]
except KeyError:
log.info('Using default %s of %s as %s is not set.', name, defaultValue, envName)
return defaultValue
else:
log.info('Overriding %s of %s with %s from %s.', name, defaultValue, value, envName)
return value | python | def lookupEnvVar(name, envName, defaultValue):
"""
Use this for looking up environment variables that control Toil and are important enough to
log the result of that lookup.
:param str name: the human readable name of the variable
:param str envName: the name of the environment variable to lookup
:param str defaultValue: the fall-back value
:return: the value of the environment variable or the default value the variable is not set
:rtype: str
"""
try:
value = os.environ[envName]
except KeyError:
log.info('Using default %s of %s as %s is not set.', name, defaultValue, envName)
return defaultValue
else:
log.info('Overriding %s of %s with %s from %s.', name, defaultValue, value, envName)
return value | ['def', 'lookupEnvVar', '(', 'name', ',', 'envName', ',', 'defaultValue', ')', ':', 'try', ':', 'value', '=', 'os', '.', 'environ', '[', 'envName', ']', 'except', 'KeyError', ':', 'log', '.', 'info', '(', "'Using default %s of %s as %s is not set.'", ',', 'name', ',', 'defaultValue', ',', 'envName', ')', 'return', 'defaultValue', 'else', ':', 'log', '.', 'info', '(', "'Overriding %s of %s with %s from %s.'", ',', 'name', ',', 'defaultValue', ',', 'value', ',', 'envName', ')', 'return', 'value'] | Use this for looking up environment variables that control Toil and are important enough to
log the result of that lookup.
:param str name: the human readable name of the variable
:param str envName: the name of the environment variable to lookup
:param str defaultValue: the fall-back value
:return: the value of the environment variable or the default value the variable is not set
:rtype: str | ['Use', 'this', 'for', 'looking', 'up', 'environment', 'variables', 'that', 'control', 'Toil', 'and', 'are', 'important', 'enough', 'to', 'log', 'the', 'result', 'of', 'that', 'lookup', '.'] | train | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/__init__.py#L236-L254 |
6,091 | merll/docker-map | dockermap/client/docker_util.py | DockerUtilityMixin.build_from_file | def build_from_file(self, dockerfile, tag, **kwargs):
"""
Builds a docker image from the given :class:`~dockermap.build.dockerfile.DockerFile`. Use this as a shortcut to
:meth:`build_from_context`, if no extra data is added to the context.
:param dockerfile: An instance of :class:`~dockermap.build.dockerfile.DockerFile`.
:type dockerfile: dockermap.build.dockerfile.DockerFile
:param tag: New image tag.
:type tag: unicode | str
:param kwargs: See :meth:`docker.client.Client.build`.
:return: New, generated image id or ``None``.
:rtype: unicode | str
"""
with DockerContext(dockerfile, finalize=True) as ctx:
return self.build_from_context(ctx, tag, **kwargs) | python | def build_from_file(self, dockerfile, tag, **kwargs):
"""
Builds a docker image from the given :class:`~dockermap.build.dockerfile.DockerFile`. Use this as a shortcut to
:meth:`build_from_context`, if no extra data is added to the context.
:param dockerfile: An instance of :class:`~dockermap.build.dockerfile.DockerFile`.
:type dockerfile: dockermap.build.dockerfile.DockerFile
:param tag: New image tag.
:type tag: unicode | str
:param kwargs: See :meth:`docker.client.Client.build`.
:return: New, generated image id or ``None``.
:rtype: unicode | str
"""
with DockerContext(dockerfile, finalize=True) as ctx:
return self.build_from_context(ctx, tag, **kwargs) | ['def', 'build_from_file', '(', 'self', ',', 'dockerfile', ',', 'tag', ',', '*', '*', 'kwargs', ')', ':', 'with', 'DockerContext', '(', 'dockerfile', ',', 'finalize', '=', 'True', ')', 'as', 'ctx', ':', 'return', 'self', '.', 'build_from_context', '(', 'ctx', ',', 'tag', ',', '*', '*', 'kwargs', ')'] | Builds a docker image from the given :class:`~dockermap.build.dockerfile.DockerFile`. Use this as a shortcut to
:meth:`build_from_context`, if no extra data is added to the context.
:param dockerfile: An instance of :class:`~dockermap.build.dockerfile.DockerFile`.
:type dockerfile: dockermap.build.dockerfile.DockerFile
:param tag: New image tag.
:type tag: unicode | str
:param kwargs: See :meth:`docker.client.Client.build`.
:return: New, generated image id or ``None``.
:rtype: unicode | str | ['Builds', 'a', 'docker', 'image', 'from', 'the', 'given', ':', 'class', ':', '~dockermap', '.', 'build', '.', 'dockerfile', '.', 'DockerFile', '.', 'Use', 'this', 'as', 'a', 'shortcut', 'to', ':', 'meth', ':', 'build_from_context', 'if', 'no', 'extra', 'data', 'is', 'added', 'to', 'the', 'context', '.'] | train | https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/client/docker_util.py#L142-L156 |
6,092 | quantmind/pulsar | pulsar/utils/config.py | Config.update | def update(self, data, default=False):
"""Update this :attr:`Config` with ``data``.
:param data: must be a ``Mapping`` like object exposing the ``item``
method for iterating through key-value pairs.
:param default: if ``True`` the updated :attr:`settings` will also
set their :attr:`~Setting.default` attribute with the
updating value (provided it is a valid one).
"""
for name, value in data.items():
if value is not None:
self.set(name, value, default) | python | def update(self, data, default=False):
"""Update this :attr:`Config` with ``data``.
:param data: must be a ``Mapping`` like object exposing the ``item``
method for iterating through key-value pairs.
:param default: if ``True`` the updated :attr:`settings` will also
set their :attr:`~Setting.default` attribute with the
updating value (provided it is a valid one).
"""
for name, value in data.items():
if value is not None:
self.set(name, value, default) | ['def', 'update', '(', 'self', ',', 'data', ',', 'default', '=', 'False', ')', ':', 'for', 'name', ',', 'value', 'in', 'data', '.', 'items', '(', ')', ':', 'if', 'value', 'is', 'not', 'None', ':', 'self', '.', 'set', '(', 'name', ',', 'value', ',', 'default', ')'] | Update this :attr:`Config` with ``data``.
:param data: must be a ``Mapping`` like object exposing the ``item``
method for iterating through key-value pairs.
:param default: if ``True`` the updated :attr:`settings` will also
set their :attr:`~Setting.default` attribute with the
updating value (provided it is a valid one). | ['Update', 'this', ':', 'attr', ':', 'Config', 'with', 'data', '.'] | train | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/config.py#L184-L195 |
6,093 | miLibris/flask-rest-jsonapi | flask_rest_jsonapi/querystring.py | QueryStringManager.filters | def filters(self):
"""Return filters from query string.
:return list: filter information
"""
results = []
filters = self.qs.get('filter')
if filters is not None:
try:
results.extend(json.loads(filters))
except (ValueError, TypeError):
raise InvalidFilters("Parse error")
if self._get_key_values('filter['):
results.extend(self._simple_filters(self._get_key_values('filter[')))
return results | python | def filters(self):
"""Return filters from query string.
:return list: filter information
"""
results = []
filters = self.qs.get('filter')
if filters is not None:
try:
results.extend(json.loads(filters))
except (ValueError, TypeError):
raise InvalidFilters("Parse error")
if self._get_key_values('filter['):
results.extend(self._simple_filters(self._get_key_values('filter[')))
return results | ['def', 'filters', '(', 'self', ')', ':', 'results', '=', '[', ']', 'filters', '=', 'self', '.', 'qs', '.', 'get', '(', "'filter'", ')', 'if', 'filters', 'is', 'not', 'None', ':', 'try', ':', 'results', '.', 'extend', '(', 'json', '.', 'loads', '(', 'filters', ')', ')', 'except', '(', 'ValueError', ',', 'TypeError', ')', ':', 'raise', 'InvalidFilters', '(', '"Parse error"', ')', 'if', 'self', '.', '_get_key_values', '(', "'filter['", ')', ':', 'results', '.', 'extend', '(', 'self', '.', '_simple_filters', '(', 'self', '.', '_get_key_values', '(', "'filter['", ')', ')', ')', 'return', 'results'] | Return filters from query string.
:return list: filter information | ['Return', 'filters', 'from', 'query', 'string', '.'] | train | https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/querystring.py#L77-L91 |
6,094 | dmwm/DBS | Server/Python/src/dbs/web/DBSReaderModel.py | DBSReaderModel.listDatasets | def listDatasets(self, dataset="", parent_dataset="", is_dataset_valid=1,
release_version="", pset_hash="", app_name="", output_module_label="", global_tag="",
processing_version=0, acquisition_era_name="", run_num=-1,
physics_group_name="", logical_file_name="", primary_ds_name="", primary_ds_type="",
processed_ds_name='', data_tier_name="", dataset_access_type="VALID", prep_id='', create_by="", last_modified_by="",
min_cdate='0', max_cdate='0', min_ldate='0', max_ldate='0', cdate='0',
ldate='0', detail=False, dataset_id=-1):
"""
API to list dataset(s) in DBS
* You can use ANY combination of these parameters in this API
* In absence of parameters, all valid datasets known to the DBS instance will be returned
:param dataset: Full dataset (path) of the dataset.
:type dataset: str
:param parent_dataset: Full dataset (path) of the dataset
:type parent_dataset: str
:param release_version: cmssw version
:type release_version: str
:param pset_hash: pset hash
:type pset_hash: str
:param app_name: Application name (generally it is cmsRun)
:type app_name: str
:param output_module_label: output_module_label
:type output_module_label: str
:param global_tag: global_tag
:type global_tag: str
:param processing_version: Processing Version
:type processing_version: str
:param acquisition_era_name: Acquisition Era
:type acquisition_era_name: str
:param run_num: Specify a specific run number or range. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is not allowed.
:type run_num: int,list,str
:param physics_group_name: List only dataset having physics_group_name attribute
:type physics_group_name: str
:param logical_file_name: List dataset containing the logical_file_name
:type logical_file_name: str
:param primary_ds_name: Primary Dataset Name
:type primary_ds_name: str
:param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA)
:type primary_ds_type: str
:param processed_ds_name: List datasets having this processed dataset name
:type processed_ds_name: str
:param data_tier_name: Data Tier
:type data_tier_name: str
:param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.)
:type dataset_access_type: str
:param prep_id: prep_id
:type prep_id: str
:param create_by: Creator of the dataset
:type create_by: str
:param last_modified_by: Last modifier of the dataset
:type last_modified_by: str
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: List all details of a dataset
:type detail: bool
:param dataset_id: dataset table primary key used by CMS Computing Analytics.
:type dataset_id: int, long, str
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
"""
dataset = dataset.replace("*", "%")
parent_dataset = parent_dataset.replace("*", "%")
release_version = release_version.replace("*", "%")
pset_hash = pset_hash.replace("*", "%")
app_name = app_name.replace("*", "%")
output_module_label = output_module_label.replace("*", "%")
global_tag = global_tag.replace("*", "%")
logical_file_name = logical_file_name.replace("*", "%")
physics_group_name = physics_group_name.replace("*", "%")
primary_ds_name = primary_ds_name.replace("*", "%")
primary_ds_type = primary_ds_type.replace("*", "%")
data_tier_name = data_tier_name.replace("*", "%")
dataset_access_type = dataset_access_type.replace("*", "%")
processed_ds_name = processed_ds_name.replace("*", "%")
acquisition_era_name = acquisition_era_name.replace("*", "%")
#processing_version = processing_version.replace("*", "%")
#create_by and last_modified_by have be full spelled, no wildcard will allowed.
#We got them from request head so they can be either HN account name or DN.
#This is depended on how an user's account is set up.
#
# In the next release we will require dataset has no wildcard in it.
# DBS will reject wildcard search with dataset name with listDatasets call.
# One should seperate the dataset into primary , process and datatier if any wildcard.
# YG Oct 26, 2016
# Some of users were overwhiled by the API change. So we split the wildcarded dataset in the server instead of by the client.
# YG Dec. 9 2016
#
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.
# YG Jan. 15 2019
#
if (run_num != -1 and logical_file_name ==''):
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.",
self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
if( dataset and ( dataset == "/%/%/%" or dataset== "/%" or dataset == "/%/%" ) ):
dataset=''
elif( dataset and ( dataset.find('%') != -1 ) ) :
junk, primary_ds_name, processed_ds_name, data_tier_name = dataset.split('/')
dataset = ''
if ( primary_ds_name == '%' ):
primary_ds_name = ''
if( processed_ds_name == '%' ):
processed_ds_name = ''
if ( data_tier_name == '%' ):
data_tier_name = ''
try:
dataset_id = int(dataset_id)
except:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for dataset_id that has to be an int.",
self.logger.exception, 'dataset_id has to be an int.')
if create_by.find('*')!=-1 or create_by.find('%')!=-1 or last_modified_by.find('*')!=-1\
or last_modified_by.find('%')!=-1:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for create_by or last_modified_by.\
No wildcard allowed.", self.logger.exception, 'No wildcards allowed for create_by or last_modified_by')
try:
if isinstance(min_cdate, basestring) and ('*' in min_cdate or '%' in min_cdate):
min_cdate = 0
else:
try:
min_cdate = int(min_cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_cdate")
if isinstance(max_cdate, basestring) and ('*' in max_cdate or '%' in max_cdate):
max_cdate = 0
else:
try:
max_cdate = int(max_cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_cdate")
if isinstance(min_ldate, basestring) and ('*' in min_ldate or '%' in min_ldate):
min_ldate = 0
else:
try:
min_ldate = int(min_ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_ldate")
if isinstance(max_ldate, basestring) and ('*' in max_ldate or '%' in max_ldate):
max_ldate = 0
else:
try:
max_ldate = int(max_ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_ldate")
if isinstance(cdate, basestring) and ('*' in cdate or '%' in cdate):
cdate = 0
else:
try:
cdate = int(cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for cdate")
if isinstance(ldate, basestring) and ('*' in ldate or '%' in ldate):
ldate = 0
else:
try:
ldate = int(ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for ldate")
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDatasets. %s \n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
detail = detail in (True, 1, "True", "1", 'true')
try:
return self.dbsDataset.listDatasets(dataset, parent_dataset, is_dataset_valid, release_version, pset_hash,
app_name, output_module_label, global_tag, processing_version, acquisition_era_name,
run_num, physics_group_name, logical_file_name, primary_ds_name, primary_ds_type, processed_ds_name,
data_tier_name, dataset_access_type, prep_id, create_by, last_modified_by,
min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate, detail, dataset_id)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listdatasets. %s.\n Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | python | def listDatasets(self, dataset="", parent_dataset="", is_dataset_valid=1,
release_version="", pset_hash="", app_name="", output_module_label="", global_tag="",
processing_version=0, acquisition_era_name="", run_num=-1,
physics_group_name="", logical_file_name="", primary_ds_name="", primary_ds_type="",
processed_ds_name='', data_tier_name="", dataset_access_type="VALID", prep_id='', create_by="", last_modified_by="",
min_cdate='0', max_cdate='0', min_ldate='0', max_ldate='0', cdate='0',
ldate='0', detail=False, dataset_id=-1):
"""
API to list dataset(s) in DBS
* You can use ANY combination of these parameters in this API
* In absence of parameters, all valid datasets known to the DBS instance will be returned
:param dataset: Full dataset (path) of the dataset.
:type dataset: str
:param parent_dataset: Full dataset (path) of the dataset
:type parent_dataset: str
:param release_version: cmssw version
:type release_version: str
:param pset_hash: pset hash
:type pset_hash: str
:param app_name: Application name (generally it is cmsRun)
:type app_name: str
:param output_module_label: output_module_label
:type output_module_label: str
:param global_tag: global_tag
:type global_tag: str
:param processing_version: Processing Version
:type processing_version: str
:param acquisition_era_name: Acquisition Era
:type acquisition_era_name: str
:param run_num: Specify a specific run number or range. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is not allowed.
:type run_num: int,list,str
:param physics_group_name: List only dataset having physics_group_name attribute
:type physics_group_name: str
:param logical_file_name: List dataset containing the logical_file_name
:type logical_file_name: str
:param primary_ds_name: Primary Dataset Name
:type primary_ds_name: str
:param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA)
:type primary_ds_type: str
:param processed_ds_name: List datasets having this processed dataset name
:type processed_ds_name: str
:param data_tier_name: Data Tier
:type data_tier_name: str
:param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.)
:type dataset_access_type: str
:param prep_id: prep_id
:type prep_id: str
:param create_by: Creator of the dataset
:type create_by: str
:param last_modified_by: Last modifier of the dataset
:type last_modified_by: str
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: List all details of a dataset
:type detail: bool
:param dataset_id: dataset table primary key used by CMS Computing Analytics.
:type dataset_id: int, long, str
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
"""
dataset = dataset.replace("*", "%")
parent_dataset = parent_dataset.replace("*", "%")
release_version = release_version.replace("*", "%")
pset_hash = pset_hash.replace("*", "%")
app_name = app_name.replace("*", "%")
output_module_label = output_module_label.replace("*", "%")
global_tag = global_tag.replace("*", "%")
logical_file_name = logical_file_name.replace("*", "%")
physics_group_name = physics_group_name.replace("*", "%")
primary_ds_name = primary_ds_name.replace("*", "%")
primary_ds_type = primary_ds_type.replace("*", "%")
data_tier_name = data_tier_name.replace("*", "%")
dataset_access_type = dataset_access_type.replace("*", "%")
processed_ds_name = processed_ds_name.replace("*", "%")
acquisition_era_name = acquisition_era_name.replace("*", "%")
#processing_version = processing_version.replace("*", "%")
#create_by and last_modified_by have be full spelled, no wildcard will allowed.
#We got them from request head so they can be either HN account name or DN.
#This is depended on how an user's account is set up.
#
# In the next release we will require dataset has no wildcard in it.
# DBS will reject wildcard search with dataset name with listDatasets call.
# One should seperate the dataset into primary , process and datatier if any wildcard.
# YG Oct 26, 2016
# Some of users were overwhiled by the API change. So we split the wildcarded dataset in the server instead of by the client.
# YG Dec. 9 2016
#
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.
# YG Jan. 15 2019
#
if (run_num != -1 and logical_file_name ==''):
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.",
self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
if( dataset and ( dataset == "/%/%/%" or dataset== "/%" or dataset == "/%/%" ) ):
dataset=''
elif( dataset and ( dataset.find('%') != -1 ) ) :
junk, primary_ds_name, processed_ds_name, data_tier_name = dataset.split('/')
dataset = ''
if ( primary_ds_name == '%' ):
primary_ds_name = ''
if( processed_ds_name == '%' ):
processed_ds_name = ''
if ( data_tier_name == '%' ):
data_tier_name = ''
try:
dataset_id = int(dataset_id)
except:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for dataset_id that has to be an int.",
self.logger.exception, 'dataset_id has to be an int.')
if create_by.find('*')!=-1 or create_by.find('%')!=-1 or last_modified_by.find('*')!=-1\
or last_modified_by.find('%')!=-1:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for create_by or last_modified_by.\
No wildcard allowed.", self.logger.exception, 'No wildcards allowed for create_by or last_modified_by')
try:
if isinstance(min_cdate, basestring) and ('*' in min_cdate or '%' in min_cdate):
min_cdate = 0
else:
try:
min_cdate = int(min_cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_cdate")
if isinstance(max_cdate, basestring) and ('*' in max_cdate or '%' in max_cdate):
max_cdate = 0
else:
try:
max_cdate = int(max_cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_cdate")
if isinstance(min_ldate, basestring) and ('*' in min_ldate or '%' in min_ldate):
min_ldate = 0
else:
try:
min_ldate = int(min_ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_ldate")
if isinstance(max_ldate, basestring) and ('*' in max_ldate or '%' in max_ldate):
max_ldate = 0
else:
try:
max_ldate = int(max_ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_ldate")
if isinstance(cdate, basestring) and ('*' in cdate or '%' in cdate):
cdate = 0
else:
try:
cdate = int(cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for cdate")
if isinstance(ldate, basestring) and ('*' in ldate or '%' in ldate):
ldate = 0
else:
try:
ldate = int(ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for ldate")
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDatasets. %s \n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
detail = detail in (True, 1, "True", "1", 'true')
try:
return self.dbsDataset.listDatasets(dataset, parent_dataset, is_dataset_valid, release_version, pset_hash,
app_name, output_module_label, global_tag, processing_version, acquisition_era_name,
run_num, physics_group_name, logical_file_name, primary_ds_name, primary_ds_type, processed_ds_name,
data_tier_name, dataset_access_type, prep_id, create_by, last_modified_by,
min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate, detail, dataset_id)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listdatasets. %s.\n Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | ['def', 'listDatasets', '(', 'self', ',', 'dataset', '=', '""', ',', 'parent_dataset', '=', '""', ',', 'is_dataset_valid', '=', '1', ',', 'release_version', '=', '""', ',', 'pset_hash', '=', '""', ',', 'app_name', '=', '""', ',', 'output_module_label', '=', '""', ',', 'global_tag', '=', '""', ',', 'processing_version', '=', '0', ',', 'acquisition_era_name', '=', '""', ',', 'run_num', '=', '-', '1', ',', 'physics_group_name', '=', '""', ',', 'logical_file_name', '=', '""', ',', 'primary_ds_name', '=', '""', ',', 'primary_ds_type', '=', '""', ',', 'processed_ds_name', '=', "''", ',', 'data_tier_name', '=', '""', ',', 'dataset_access_type', '=', '"VALID"', ',', 'prep_id', '=', "''", ',', 'create_by', '=', '""', ',', 'last_modified_by', '=', '""', ',', 'min_cdate', '=', "'0'", ',', 'max_cdate', '=', "'0'", ',', 'min_ldate', '=', "'0'", ',', 'max_ldate', '=', "'0'", ',', 'cdate', '=', "'0'", ',', 'ldate', '=', "'0'", ',', 'detail', '=', 'False', ',', 'dataset_id', '=', '-', '1', ')', ':', 'dataset', '=', 'dataset', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'parent_dataset', '=', 'parent_dataset', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'release_version', '=', 'release_version', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'pset_hash', '=', 'pset_hash', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'app_name', '=', 'app_name', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'output_module_label', '=', 'output_module_label', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'global_tag', '=', 'global_tag', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'logical_file_name', '=', 'logical_file_name', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'physics_group_name', '=', 'physics_group_name', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'primary_ds_name', '=', 'primary_ds_name', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'primary_ds_type', '=', 'primary_ds_type', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'data_tier_name', '=', 'data_tier_name', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'dataset_access_type', '=', 'dataset_access_type', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'processed_ds_name', '=', 'processed_ds_name', '.', 'replace', '(', '"*"', ',', '"%"', ')', 'acquisition_era_name', '=', 'acquisition_era_name', '.', 'replace', '(', '"*"', ',', '"%"', ')', '#processing_version = processing_version.replace("*", "%")', '#create_by and last_modified_by have be full spelled, no wildcard will allowed.', '#We got them from request head so they can be either HN account name or DN.', "#This is depended on how an user's account is set up.", '#', '# In the next release we will require dataset has no wildcard in it. ', '# DBS will reject wildcard search with dataset name with listDatasets call. ', '# One should seperate the dataset into primary , process and datatier if any wildcard.', '# YG Oct 26, 2016', '# Some of users were overwhiled by the API change. So we split the wildcarded dataset in the server instead of by the client.', '# YG Dec. 9 2016', '#', '# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours', '# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.', '# YG Jan. 15 2019', '#', 'if', '(', 'run_num', '!=', '-', '1', 'and', 'logical_file_name', '==', "''", ')', ':', 'for', 'r', 'in', 'parseRunRange', '(', 'run_num', ')', ':', 'if', 'isinstance', '(', 'r', ',', 'basestring', ')', 'or', 'isinstance', '(', 'r', ',', 'int', ')', 'or', 'isinstance', '(', 'r', ',', 'long', ')', ':', 'if', 'r', '==', '1', 'or', 'r', '==', "'1'", ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input"', ',', '"Run_num=1 is not a valid input."', ',', 'self', '.', 'logger', '.', 'exception', ')', 'elif', 'isinstance', '(', 'r', ',', 'run_tuple', ')', ':', 'if', 'r', '[', '0', ']', '==', 'r', '[', '1', ']', ':', 'dbsExceptionHandler', '(', "'dbsException-invalid-input'", ',', '"DBS run range must be apart at least by 1."', ',', 'self', '.', 'logger', '.', 'exception', ')', 'elif', 'r', '[', '0', ']', '<=', '1', '<=', 'r', '[', '1', ']', ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input"', ',', '"Run_num=1 is not a valid input."', ',', 'self', '.', 'logger', '.', 'exception', ')', 'if', '(', 'dataset', 'and', '(', 'dataset', '==', '"/%/%/%"', 'or', 'dataset', '==', '"/%"', 'or', 'dataset', '==', '"/%/%"', ')', ')', ':', 'dataset', '=', "''", 'elif', '(', 'dataset', 'and', '(', 'dataset', '.', 'find', '(', "'%'", ')', '!=', '-', '1', ')', ')', ':', 'junk', ',', 'primary_ds_name', ',', 'processed_ds_name', ',', 'data_tier_name', '=', 'dataset', '.', 'split', '(', "'/'", ')', 'dataset', '=', "''", 'if', '(', 'primary_ds_name', '==', "'%'", ')', ':', 'primary_ds_name', '=', "''", 'if', '(', 'processed_ds_name', '==', "'%'", ')', ':', 'processed_ds_name', '=', "''", 'if', '(', 'data_tier_name', '==', "'%'", ')', ':', 'data_tier_name', '=', "''", 'try', ':', 'dataset_id', '=', 'int', '(', 'dataset_id', ')', 'except', ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input2"', ',', '"Invalid Input for dataset_id that has to be an int."', ',', 'self', '.', 'logger', '.', 'exception', ',', "'dataset_id has to be an int.'", ')', 'if', 'create_by', '.', 'find', '(', "'*'", ')', '!=', '-', '1', 'or', 'create_by', '.', 'find', '(', "'%'", ')', '!=', '-', '1', 'or', 'last_modified_by', '.', 'find', '(', "'*'", ')', '!=', '-', '1', 'or', 'last_modified_by', '.', 'find', '(', "'%'", ')', '!=', '-', '1', ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input2"', ',', '"Invalid Input for create_by or last_modified_by.\\\n No wildcard allowed."', ',', 'self', '.', 'logger', '.', 'exception', ',', "'No wildcards allowed for create_by or last_modified_by'", ')', 'try', ':', 'if', 'isinstance', '(', 'min_cdate', ',', 'basestring', ')', 'and', '(', "'*'", 'in', 'min_cdate', 'or', "'%'", 'in', 'min_cdate', ')', ':', 'min_cdate', '=', '0', 'else', ':', 'try', ':', 'min_cdate', '=', 'int', '(', 'min_cdate', ')', 'except', ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input"', ',', '"invalid input for min_cdate"', ')', 'if', 'isinstance', '(', 'max_cdate', ',', 'basestring', ')', 'and', '(', "'*'", 'in', 'max_cdate', 'or', "'%'", 'in', 'max_cdate', ')', ':', 'max_cdate', '=', '0', 'else', ':', 'try', ':', 'max_cdate', '=', 'int', '(', 'max_cdate', ')', 'except', ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input"', ',', '"invalid input for max_cdate"', ')', 'if', 'isinstance', '(', 'min_ldate', ',', 'basestring', ')', 'and', '(', "'*'", 'in', 'min_ldate', 'or', "'%'", 'in', 'min_ldate', ')', ':', 'min_ldate', '=', '0', 'else', ':', 'try', ':', 'min_ldate', '=', 'int', '(', 'min_ldate', ')', 'except', ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input"', ',', '"invalid input for min_ldate"', ')', 'if', 'isinstance', '(', 'max_ldate', ',', 'basestring', ')', 'and', '(', "'*'", 'in', 'max_ldate', 'or', "'%'", 'in', 'max_ldate', ')', ':', 'max_ldate', '=', '0', 'else', ':', 'try', ':', 'max_ldate', '=', 'int', '(', 'max_ldate', ')', 'except', ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input"', ',', '"invalid input for max_ldate"', ')', 'if', 'isinstance', '(', 'cdate', ',', 'basestring', ')', 'and', '(', "'*'", 'in', 'cdate', 'or', "'%'", 'in', 'cdate', ')', ':', 'cdate', '=', '0', 'else', ':', 'try', ':', 'cdate', '=', 'int', '(', 'cdate', ')', 'except', ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input"', ',', '"invalid input for cdate"', ')', 'if', 'isinstance', '(', 'ldate', ',', 'basestring', ')', 'and', '(', "'*'", 'in', 'ldate', 'or', "'%'", 'in', 'ldate', ')', ':', 'ldate', '=', '0', 'else', ':', 'try', ':', 'ldate', '=', 'int', '(', 'ldate', ')', 'except', ':', 'dbsExceptionHandler', '(', '"dbsException-invalid-input"', ',', '"invalid input for ldate"', ')', 'except', 'dbsException', 'as', 'de', ':', 'dbsExceptionHandler', '(', 'de', '.', 'eCode', ',', 'de', '.', 'message', ',', 'self', '.', 'logger', '.', 'exception', ',', 'de', '.', 'serverError', ')', 'except', 'Exception', 'as', 'ex', ':', 'sError', '=', '"DBSReaderModel/listDatasets. %s \\n. Exception trace: \\n %s"', '%', '(', 'ex', ',', 'traceback', '.', 'format_exc', '(', ')', ')', 'dbsExceptionHandler', '(', "'dbsException-server-error'", ',', 'dbsExceptionCode', '[', "'dbsException-server-error'", ']', ',', 'self', '.', 'logger', '.', 'exception', ',', 'sError', ')', 'detail', '=', 'detail', 'in', '(', 'True', ',', '1', ',', '"True"', ',', '"1"', ',', "'true'", ')', 'try', ':', 'return', 'self', '.', 'dbsDataset', '.', 'listDatasets', '(', 'dataset', ',', 'parent_dataset', ',', 'is_dataset_valid', ',', 'release_version', ',', 'pset_hash', ',', 'app_name', ',', 'output_module_label', ',', 'global_tag', ',', 'processing_version', ',', 'acquisition_era_name', ',', 'run_num', ',', 'physics_group_name', ',', 'logical_file_name', ',', 'primary_ds_name', ',', 'primary_ds_type', ',', 'processed_ds_name', ',', 'data_tier_name', ',', 'dataset_access_type', ',', 'prep_id', ',', 'create_by', ',', 'last_modified_by', ',', 'min_cdate', ',', 'max_cdate', ',', 'min_ldate', ',', 'max_ldate', ',', 'cdate', ',', 'ldate', ',', 'detail', ',', 'dataset_id', ')', 'except', 'dbsException', 'as', 'de', ':', 'dbsExceptionHandler', '(', 'de', '.', 'eCode', ',', 'de', '.', 'message', ',', 'self', '.', 'logger', '.', 'exception', ',', 'de', '.', 'serverError', ')', 'except', 'Exception', 'as', 'ex', ':', 'sError', '=', '"DBSReaderModel/listdatasets. %s.\\n Exception trace: \\n %s"', '%', '(', 'ex', ',', 'traceback', '.', 'format_exc', '(', ')', ')', 'dbsExceptionHandler', '(', "'dbsException-server-error'", ',', 'dbsExceptionCode', '[', "'dbsException-server-error'", ']', ',', 'self', '.', 'logger', '.', 'exception', ',', 'sError', ')'] | API to list dataset(s) in DBS
* You can use ANY combination of these parameters in this API
* In absence of parameters, all valid datasets known to the DBS instance will be returned
:param dataset: Full dataset (path) of the dataset.
:type dataset: str
:param parent_dataset: Full dataset (path) of the dataset
:type parent_dataset: str
:param release_version: cmssw version
:type release_version: str
:param pset_hash: pset hash
:type pset_hash: str
:param app_name: Application name (generally it is cmsRun)
:type app_name: str
:param output_module_label: output_module_label
:type output_module_label: str
:param global_tag: global_tag
:type global_tag: str
:param processing_version: Processing Version
:type processing_version: str
:param acquisition_era_name: Acquisition Era
:type acquisition_era_name: str
:param run_num: Specify a specific run number or range. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is not allowed.
:type run_num: int,list,str
:param physics_group_name: List only dataset having physics_group_name attribute
:type physics_group_name: str
:param logical_file_name: List dataset containing the logical_file_name
:type logical_file_name: str
:param primary_ds_name: Primary Dataset Name
:type primary_ds_name: str
:param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA)
:type primary_ds_type: str
:param processed_ds_name: List datasets having this processed dataset name
:type processed_ds_name: str
:param data_tier_name: Data Tier
:type data_tier_name: str
:param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.)
:type dataset_access_type: str
:param prep_id: prep_id
:type prep_id: str
:param create_by: Creator of the dataset
:type create_by: str
:param last_modified_by: Last modifier of the dataset
:type last_modified_by: str
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: List all details of a dataset
:type detail: bool
:param dataset_id: dataset table primary key used by CMS Computing Analytics.
:type dataset_id: int, long, str
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts | ['API', 'to', 'list', 'dataset', '(', 's', ')', 'in', 'DBS', '*', 'You', 'can', 'use', 'ANY', 'combination', 'of', 'these', 'parameters', 'in', 'this', 'API', '*', 'In', 'absence', 'of', 'parameters', 'all', 'valid', 'datasets', 'known', 'to', 'the', 'DBS', 'instance', 'will', 'be', 'returned'] | train | https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSReaderModel.py#L284-L488 |
6,095 | crackinglandia/pype32 | pype32/directories.py | NETDirectory.parse | def parse(readDataInstance):
"""
Returns a new L{NETDirectory} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NETDirectory} object.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object.
"""
nd = NETDirectory()
nd.directory = NetDirectory.parse(readDataInstance)
nd.netMetaDataHeader = NetMetaDataHeader.parse(readDataInstance)
nd.netMetaDataStreams = NetMetaDataStreams.parse(readDataInstance)
return nd | python | def parse(readDataInstance):
"""
Returns a new L{NETDirectory} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NETDirectory} object.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object.
"""
nd = NETDirectory()
nd.directory = NetDirectory.parse(readDataInstance)
nd.netMetaDataHeader = NetMetaDataHeader.parse(readDataInstance)
nd.netMetaDataStreams = NetMetaDataStreams.parse(readDataInstance)
return nd | ['def', 'parse', '(', 'readDataInstance', ')', ':', 'nd', '=', 'NETDirectory', '(', ')', 'nd', '.', 'directory', '=', 'NetDirectory', '.', 'parse', '(', 'readDataInstance', ')', 'nd', '.', 'netMetaDataHeader', '=', 'NetMetaDataHeader', '.', 'parse', '(', 'readDataInstance', ')', 'nd', '.', 'netMetaDataStreams', '=', 'NetMetaDataStreams', '.', 'parse', '(', 'readDataInstance', ')', 'return', 'nd'] | Returns a new L{NETDirectory} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NETDirectory} object.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object. | ['Returns', 'a', 'new', 'L', '{', 'NETDirectory', '}', 'object', '.'] | train | https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L945-L960 |
6,096 | infothrill/python-dyndnsc | dyndnsc/detector/base.py | IPDetector.set_current_value | def set_current_value(self, value):
"""Set the detected IP in the current run (if any)."""
self._oldvalue = self.get_current_value()
self._currentvalue = value
if self._oldvalue != value:
# self.notify_observers("new_ip_detected", {"ip": value})
LOG.debug("%s.set_current_value(%s)", self.__class__.__name__, value)
return value | python | def set_current_value(self, value):
"""Set the detected IP in the current run (if any)."""
self._oldvalue = self.get_current_value()
self._currentvalue = value
if self._oldvalue != value:
# self.notify_observers("new_ip_detected", {"ip": value})
LOG.debug("%s.set_current_value(%s)", self.__class__.__name__, value)
return value | ['def', 'set_current_value', '(', 'self', ',', 'value', ')', ':', 'self', '.', '_oldvalue', '=', 'self', '.', 'get_current_value', '(', ')', 'self', '.', '_currentvalue', '=', 'value', 'if', 'self', '.', '_oldvalue', '!=', 'value', ':', '# self.notify_observers("new_ip_detected", {"ip": value})', 'LOG', '.', 'debug', '(', '"%s.set_current_value(%s)"', ',', 'self', '.', '__class__', '.', '__name__', ',', 'value', ')', 'return', 'value'] | Set the detected IP in the current run (if any). | ['Set', 'the', 'detected', 'IP', 'in', 'the', 'current', 'run', '(', 'if', 'any', ')', '.'] | train | https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/detector/base.py#L75-L82 |
6,097 | KieranWynn/pyquaternion | pyquaternion/quaternion.py | Quaternion.exp | def exp(cls, q):
"""Quaternion Exponential.
Find the exponential of a quaternion amount.
Params:
q: the input quaternion/argument as a Quaternion object.
Returns:
A quaternion amount representing the exp(q). See [Source](https://math.stackexchange.com/questions/1030737/exponential-function-of-quaternion-derivation for more information and mathematical background).
Note:
The method can compute the exponential of any quaternion.
"""
tolerance = 1e-17
v_norm = np.linalg.norm(q.vector)
vec = q.vector
if v_norm > tolerance:
vec = vec / v_norm
magnitude = exp(q.scalar)
return Quaternion(scalar = magnitude * cos(v_norm), vector = magnitude * sin(v_norm) * vec) | python | def exp(cls, q):
"""Quaternion Exponential.
Find the exponential of a quaternion amount.
Params:
q: the input quaternion/argument as a Quaternion object.
Returns:
A quaternion amount representing the exp(q). See [Source](https://math.stackexchange.com/questions/1030737/exponential-function-of-quaternion-derivation for more information and mathematical background).
Note:
The method can compute the exponential of any quaternion.
"""
tolerance = 1e-17
v_norm = np.linalg.norm(q.vector)
vec = q.vector
if v_norm > tolerance:
vec = vec / v_norm
magnitude = exp(q.scalar)
return Quaternion(scalar = magnitude * cos(v_norm), vector = magnitude * sin(v_norm) * vec) | ['def', 'exp', '(', 'cls', ',', 'q', ')', ':', 'tolerance', '=', '1e-17', 'v_norm', '=', 'np', '.', 'linalg', '.', 'norm', '(', 'q', '.', 'vector', ')', 'vec', '=', 'q', '.', 'vector', 'if', 'v_norm', '>', 'tolerance', ':', 'vec', '=', 'vec', '/', 'v_norm', 'magnitude', '=', 'exp', '(', 'q', '.', 'scalar', ')', 'return', 'Quaternion', '(', 'scalar', '=', 'magnitude', '*', 'cos', '(', 'v_norm', ')', ',', 'vector', '=', 'magnitude', '*', 'sin', '(', 'v_norm', ')', '*', 'vec', ')'] | Quaternion Exponential.
Find the exponential of a quaternion amount.
Params:
q: the input quaternion/argument as a Quaternion object.
Returns:
A quaternion amount representing the exp(q). See [Source](https://math.stackexchange.com/questions/1030737/exponential-function-of-quaternion-derivation for more information and mathematical background).
Note:
The method can compute the exponential of any quaternion. | ['Quaternion', 'Exponential', '.'] | train | https://github.com/KieranWynn/pyquaternion/blob/d2aad7f3fb0d4b9cc23aa72b390e9b2e1273eae9/pyquaternion/quaternion.py#L625-L645 |
6,098 | google/grr | grr/client/grr_response_client/windows/installers.py | CopyToSystemDir.StopPreviousService | def StopPreviousService(self):
"""Stops the Windows service hosting the GRR process."""
StopService(
service_name=config.CONFIG["Nanny.service_name"],
service_binary_name=config.CONFIG["Nanny.service_binary_name"])
if not config.CONFIG["Client.fleetspeak_enabled"]:
return
StopService(service_name=config.CONFIG["Client.fleetspeak_service_name"])
# Delete GRR's Fleetspeak config from the registry so Fleetspeak
# doesn't try to restart GRR unless/until installation completes
# successfully.
key_path = config.CONFIG["Client.fleetspeak_unsigned_services_regkey"]
regkey = OpenRegkey(key_path)
try:
winreg.DeleteValue(regkey, config.CONFIG["Client.name"])
logging.info("Deleted value '%s' of key '%s'.",
config.CONFIG["Client.name"], key_path)
except OSError as e:
# Windows will raise a no-such-file-or-directory error if
# GRR's config hasn't been written to the registry yet.
if e.errno != errno.ENOENT:
raise | python | def StopPreviousService(self):
"""Stops the Windows service hosting the GRR process."""
StopService(
service_name=config.CONFIG["Nanny.service_name"],
service_binary_name=config.CONFIG["Nanny.service_binary_name"])
if not config.CONFIG["Client.fleetspeak_enabled"]:
return
StopService(service_name=config.CONFIG["Client.fleetspeak_service_name"])
# Delete GRR's Fleetspeak config from the registry so Fleetspeak
# doesn't try to restart GRR unless/until installation completes
# successfully.
key_path = config.CONFIG["Client.fleetspeak_unsigned_services_regkey"]
regkey = OpenRegkey(key_path)
try:
winreg.DeleteValue(regkey, config.CONFIG["Client.name"])
logging.info("Deleted value '%s' of key '%s'.",
config.CONFIG["Client.name"], key_path)
except OSError as e:
# Windows will raise a no-such-file-or-directory error if
# GRR's config hasn't been written to the registry yet.
if e.errno != errno.ENOENT:
raise | ['def', 'StopPreviousService', '(', 'self', ')', ':', 'StopService', '(', 'service_name', '=', 'config', '.', 'CONFIG', '[', '"Nanny.service_name"', ']', ',', 'service_binary_name', '=', 'config', '.', 'CONFIG', '[', '"Nanny.service_binary_name"', ']', ')', 'if', 'not', 'config', '.', 'CONFIG', '[', '"Client.fleetspeak_enabled"', ']', ':', 'return', 'StopService', '(', 'service_name', '=', 'config', '.', 'CONFIG', '[', '"Client.fleetspeak_service_name"', ']', ')', "# Delete GRR's Fleetspeak config from the registry so Fleetspeak", "# doesn't try to restart GRR unless/until installation completes", '# successfully.', 'key_path', '=', 'config', '.', 'CONFIG', '[', '"Client.fleetspeak_unsigned_services_regkey"', ']', 'regkey', '=', 'OpenRegkey', '(', 'key_path', ')', 'try', ':', 'winreg', '.', 'DeleteValue', '(', 'regkey', ',', 'config', '.', 'CONFIG', '[', '"Client.name"', ']', ')', 'logging', '.', 'info', '(', '"Deleted value \'%s\' of key \'%s\'."', ',', 'config', '.', 'CONFIG', '[', '"Client.name"', ']', ',', 'key_path', ')', 'except', 'OSError', 'as', 'e', ':', '# Windows will raise a no-such-file-or-directory error if', "# GRR's config hasn't been written to the registry yet.", 'if', 'e', '.', 'errno', '!=', 'errno', '.', 'ENOENT', ':', 'raise'] | Stops the Windows service hosting the GRR process. | ['Stops', 'the', 'Windows', 'service', 'hosting', 'the', 'GRR', 'process', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/windows/installers.py#L141-L165 |
6,099 | nilp0inter/cpe | cpe/comp/cpecomp2_3_uri.py | CPEComponent2_3_URI._decode | def _decode(self):
"""
Convert the characters of character in value of component to standard
value (WFN value).
This function scans the value of component and returns a copy
with all percent-encoded characters decoded.
:exception: ValueError - invalid character in value of component
"""
result = []
idx = 0
s = self._encoded_value
embedded = False
errmsg = []
errmsg.append("Invalid value: ")
while (idx < len(s)):
errmsg.append(s)
errmsg_str = "".join(errmsg)
# Get the idx'th character of s
c = s[idx]
# Deal with dot, hyphen and tilde: decode with quoting
if ((c == '.') or (c == '-') or (c == '~')):
result.append("\\")
result.append(c)
idx += 1
embedded = True # a non-%01 encountered
continue
if (c != '%'):
result.append(c)
idx += 1
embedded = True # a non-%01 encountered
continue
# we get here if we have a substring starting w/ '%'
form = s[idx: idx + 3] # get the three-char sequence
if form == CPEComponent2_3_URI.WILDCARD_ONE:
# If %01 legal at beginning or end
# embedded is false, so must be preceded by %01
# embedded is true, so must be followed by %01
if (((idx == 0) or (idx == (len(s)-3))) or
((not embedded) and (s[idx - 3:idx] == CPEComponent2_3_URI.WILDCARD_ONE)) or
(embedded and (len(s) >= idx + 6) and (s[idx + 3:idx + 6] == CPEComponent2_3_URI.WILDCARD_ONE))):
# A percent-encoded question mark is found
# at the beginning or the end of the string,
# or embedded in sequence as required.
# Decode to unquoted form.
result.append(CPEComponent2_3_WFN.WILDCARD_ONE)
idx += 3
continue
else:
raise ValueError(errmsg_str)
elif form == CPEComponent2_3_URI.WILDCARD_MULTI:
if ((idx == 0) or (idx == (len(s) - 3))):
# Percent-encoded asterisk is at the beginning
# or the end of the string, as required.
# Decode to unquoted form.
result.append(CPEComponent2_3_WFN.WILDCARD_MULTI)
else:
raise ValueError(errmsg_str)
elif form in CPEComponent2_3_URI.pce_char_to_decode.keys():
value = CPEComponent2_3_URI.pce_char_to_decode[form]
result.append(value)
else:
errmsg.append("Invalid percent-encoded character: ")
errmsg.append(s)
raise ValueError("".join(errmsg))
idx += 3
embedded = True # a non-%01 encountered.
self._standard_value = "".join(result) | python | def _decode(self):
"""
Convert the characters of character in value of component to standard
value (WFN value).
This function scans the value of component and returns a copy
with all percent-encoded characters decoded.
:exception: ValueError - invalid character in value of component
"""
result = []
idx = 0
s = self._encoded_value
embedded = False
errmsg = []
errmsg.append("Invalid value: ")
while (idx < len(s)):
errmsg.append(s)
errmsg_str = "".join(errmsg)
# Get the idx'th character of s
c = s[idx]
# Deal with dot, hyphen and tilde: decode with quoting
if ((c == '.') or (c == '-') or (c == '~')):
result.append("\\")
result.append(c)
idx += 1
embedded = True # a non-%01 encountered
continue
if (c != '%'):
result.append(c)
idx += 1
embedded = True # a non-%01 encountered
continue
# we get here if we have a substring starting w/ '%'
form = s[idx: idx + 3] # get the three-char sequence
if form == CPEComponent2_3_URI.WILDCARD_ONE:
# If %01 legal at beginning or end
# embedded is false, so must be preceded by %01
# embedded is true, so must be followed by %01
if (((idx == 0) or (idx == (len(s)-3))) or
((not embedded) and (s[idx - 3:idx] == CPEComponent2_3_URI.WILDCARD_ONE)) or
(embedded and (len(s) >= idx + 6) and (s[idx + 3:idx + 6] == CPEComponent2_3_URI.WILDCARD_ONE))):
# A percent-encoded question mark is found
# at the beginning or the end of the string,
# or embedded in sequence as required.
# Decode to unquoted form.
result.append(CPEComponent2_3_WFN.WILDCARD_ONE)
idx += 3
continue
else:
raise ValueError(errmsg_str)
elif form == CPEComponent2_3_URI.WILDCARD_MULTI:
if ((idx == 0) or (idx == (len(s) - 3))):
# Percent-encoded asterisk is at the beginning
# or the end of the string, as required.
# Decode to unquoted form.
result.append(CPEComponent2_3_WFN.WILDCARD_MULTI)
else:
raise ValueError(errmsg_str)
elif form in CPEComponent2_3_URI.pce_char_to_decode.keys():
value = CPEComponent2_3_URI.pce_char_to_decode[form]
result.append(value)
else:
errmsg.append("Invalid percent-encoded character: ")
errmsg.append(s)
raise ValueError("".join(errmsg))
idx += 3
embedded = True # a non-%01 encountered.
self._standard_value = "".join(result) | ['def', '_decode', '(', 'self', ')', ':', 'result', '=', '[', ']', 'idx', '=', '0', 's', '=', 'self', '.', '_encoded_value', 'embedded', '=', 'False', 'errmsg', '=', '[', ']', 'errmsg', '.', 'append', '(', '"Invalid value: "', ')', 'while', '(', 'idx', '<', 'len', '(', 's', ')', ')', ':', 'errmsg', '.', 'append', '(', 's', ')', 'errmsg_str', '=', '""', '.', 'join', '(', 'errmsg', ')', "# Get the idx'th character of s", 'c', '=', 's', '[', 'idx', ']', '# Deal with dot, hyphen and tilde: decode with quoting', 'if', '(', '(', 'c', '==', "'.'", ')', 'or', '(', 'c', '==', "'-'", ')', 'or', '(', 'c', '==', "'~'", ')', ')', ':', 'result', '.', 'append', '(', '"\\\\"', ')', 'result', '.', 'append', '(', 'c', ')', 'idx', '+=', '1', 'embedded', '=', 'True', '# a non-%01 encountered', 'continue', 'if', '(', 'c', '!=', "'%'", ')', ':', 'result', '.', 'append', '(', 'c', ')', 'idx', '+=', '1', 'embedded', '=', 'True', '# a non-%01 encountered', 'continue', "# we get here if we have a substring starting w/ '%'", 'form', '=', 's', '[', 'idx', ':', 'idx', '+', '3', ']', '# get the three-char sequence', 'if', 'form', '==', 'CPEComponent2_3_URI', '.', 'WILDCARD_ONE', ':', '# If %01 legal at beginning or end', '# embedded is false, so must be preceded by %01', '# embedded is true, so must be followed by %01', 'if', '(', '(', '(', 'idx', '==', '0', ')', 'or', '(', 'idx', '==', '(', 'len', '(', 's', ')', '-', '3', ')', ')', ')', 'or', '(', '(', 'not', 'embedded', ')', 'and', '(', 's', '[', 'idx', '-', '3', ':', 'idx', ']', '==', 'CPEComponent2_3_URI', '.', 'WILDCARD_ONE', ')', ')', 'or', '(', 'embedded', 'and', '(', 'len', '(', 's', ')', '>=', 'idx', '+', '6', ')', 'and', '(', 's', '[', 'idx', '+', '3', ':', 'idx', '+', '6', ']', '==', 'CPEComponent2_3_URI', '.', 'WILDCARD_ONE', ')', ')', ')', ':', '# A percent-encoded question mark is found', '# at the beginning or the end of the string,', '# or embedded in sequence as required.', '# Decode to unquoted form.', 'result', '.', 'append', '(', 'CPEComponent2_3_WFN', '.', 'WILDCARD_ONE', ')', 'idx', '+=', '3', 'continue', 'else', ':', 'raise', 'ValueError', '(', 'errmsg_str', ')', 'elif', 'form', '==', 'CPEComponent2_3_URI', '.', 'WILDCARD_MULTI', ':', 'if', '(', '(', 'idx', '==', '0', ')', 'or', '(', 'idx', '==', '(', 'len', '(', 's', ')', '-', '3', ')', ')', ')', ':', '# Percent-encoded asterisk is at the beginning', '# or the end of the string, as required.', '# Decode to unquoted form.', 'result', '.', 'append', '(', 'CPEComponent2_3_WFN', '.', 'WILDCARD_MULTI', ')', 'else', ':', 'raise', 'ValueError', '(', 'errmsg_str', ')', 'elif', 'form', 'in', 'CPEComponent2_3_URI', '.', 'pce_char_to_decode', '.', 'keys', '(', ')', ':', 'value', '=', 'CPEComponent2_3_URI', '.', 'pce_char_to_decode', '[', 'form', ']', 'result', '.', 'append', '(', 'value', ')', 'else', ':', 'errmsg', '.', 'append', '(', '"Invalid percent-encoded character: "', ')', 'errmsg', '.', 'append', '(', 's', ')', 'raise', 'ValueError', '(', '""', '.', 'join', '(', 'errmsg', ')', ')', 'idx', '+=', '3', 'embedded', '=', 'True', '# a non-%01 encountered.', 'self', '.', '_standard_value', '=', '""', '.', 'join', '(', 'result', ')'] | Convert the characters of character in value of component to standard
value (WFN value).
This function scans the value of component and returns a copy
with all percent-encoded characters decoded.
:exception: ValueError - invalid character in value of component | ['Convert', 'the', 'characters', 'of', 'character', 'in', 'value', 'of', 'component', 'to', 'standard', 'value', '(', 'WFN', 'value', ')', '.', 'This', 'function', 'scans', 'the', 'value', 'of', 'component', 'and', 'returns', 'a', 'copy', 'with', 'all', 'percent', '-', 'encoded', 'characters', 'decoded', '.'] | train | https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp2_3_uri.py#L163-L244 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.