Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
7,400 | samluescher/django-media-tree | media_tree/utils/staticfiles.py | MimetypeStaticIconFileFinder.find | def find(file_node, dirs=ICON_DIRS, default_name=None, file_ext='.png'):
"""
Iterating all icon dirs, try to find a file called like the node's
extension / mime subtype / mime type (in that order).
For instance, for an MP3 file ("audio/mpeg"), this would look for:
"mp3.png" / "audio/mpeg.png" / "audio.png"
"""
names = []
for attr_name in ('extension', 'mimetype', 'mime_supertype'):
attr = getattr(file_node, attr_name)
if attr:
names.append(attr)
if default_name:
names.append(default_name)
icon_path = StaticPathFinder.find(names, dirs, file_ext)
if icon_path:
return StaticIconFile(file_node, icon_path) | python | def find(file_node, dirs=ICON_DIRS, default_name=None, file_ext='.png'):
"""
Iterating all icon dirs, try to find a file called like the node's
extension / mime subtype / mime type (in that order).
For instance, for an MP3 file ("audio/mpeg"), this would look for:
"mp3.png" / "audio/mpeg.png" / "audio.png"
"""
names = []
for attr_name in ('extension', 'mimetype', 'mime_supertype'):
attr = getattr(file_node, attr_name)
if attr:
names.append(attr)
if default_name:
names.append(default_name)
icon_path = StaticPathFinder.find(names, dirs, file_ext)
if icon_path:
return StaticIconFile(file_node, icon_path) | ['def', 'find', '(', 'file_node', ',', 'dirs', '=', 'ICON_DIRS', ',', 'default_name', '=', 'None', ',', 'file_ext', '=', "'.png'", ')', ':', 'names', '=', '[', ']', 'for', 'attr_name', 'in', '(', "'extension'", ',', "'mimetype'", ',', "'mime_supertype'", ')', ':', 'attr', '=', 'getattr', '(', 'file_node', ',', 'attr_name', ')', 'if', 'attr', ':', 'names', '.', 'append', '(', 'attr', ')', 'if', 'default_name', ':', 'names', '.', 'append', '(', 'default_name', ')', 'icon_path', '=', 'StaticPathFinder', '.', 'find', '(', 'names', ',', 'dirs', ',', 'file_ext', ')', 'if', 'icon_path', ':', 'return', 'StaticIconFile', '(', 'file_node', ',', 'icon_path', ')'] | Iterating all icon dirs, try to find a file called like the node's
extension / mime subtype / mime type (in that order).
For instance, for an MP3 file ("audio/mpeg"), this would look for:
"mp3.png" / "audio/mpeg.png" / "audio.png" | ['Iterating', 'all', 'icon', 'dirs', 'try', 'to', 'find', 'a', 'file', 'called', 'like', 'the', 'node', 's', 'extension', '/', 'mime', 'subtype', '/', 'mime', 'type', '(', 'in', 'that', 'order', ')', '.', 'For', 'instance', 'for', 'an', 'MP3', 'file', '(', 'audio', '/', 'mpeg', ')', 'this', 'would', 'look', 'for', ':', 'mp3', '.', 'png', '/', 'audio', '/', 'mpeg', '.', 'png', '/', 'audio', '.', 'png'] | train | https://github.com/samluescher/django-media-tree/blob/3eb6345faaf57e2fbe35ca431d4d133f950f2b5f/media_tree/utils/staticfiles.py#L108-L124 |
7,401 | ultrabug/py3status | py3status/py3.py | Py3.storage_keys | def storage_keys(self):
"""
Return a list of the keys for values stored for the module.
Keys will contain the following metadata entries:
- '_ctime': storage creation timestamp
- '_mtime': storage last modification timestamp
"""
if not self._module:
return []
self._storage_init()
module_name = self._module.module_full_name
return self._storage.storage_keys(module_name) | python | def storage_keys(self):
"""
Return a list of the keys for values stored for the module.
Keys will contain the following metadata entries:
- '_ctime': storage creation timestamp
- '_mtime': storage last modification timestamp
"""
if not self._module:
return []
self._storage_init()
module_name = self._module.module_full_name
return self._storage.storage_keys(module_name) | ['def', 'storage_keys', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_module', ':', 'return', '[', ']', 'self', '.', '_storage_init', '(', ')', 'module_name', '=', 'self', '.', '_module', '.', 'module_full_name', 'return', 'self', '.', '_storage', '.', 'storage_keys', '(', 'module_name', ')'] | Return a list of the keys for values stored for the module.
Keys will contain the following metadata entries:
- '_ctime': storage creation timestamp
- '_mtime': storage last modification timestamp | ['Return', 'a', 'list', 'of', 'the', 'keys', 'for', 'values', 'stored', 'for', 'the', 'module', '.'] | train | https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/py3.py#L1082-L1094 |
7,402 | spyder-ide/spyder | spyder/plugins/editor/utils/editor.py | TextHelper.line_pos_from_number | def line_pos_from_number(self, line_number):
"""
Computes line position on Y-Axis (at the center of the line) from line
number.
:param line_number: The line number for which we want to know the
position in pixels.
:return: The center position of the line.
"""
editor = self._editor
block = editor.document().findBlockByNumber(line_number)
if block.isValid():
return int(editor.blockBoundingGeometry(block).translated(
editor.contentOffset()).top())
if line_number <= 0:
return 0
else:
return int(editor.blockBoundingGeometry(
block.previous()).translated(editor.contentOffset()).bottom()) | python | def line_pos_from_number(self, line_number):
"""
Computes line position on Y-Axis (at the center of the line) from line
number.
:param line_number: The line number for which we want to know the
position in pixels.
:return: The center position of the line.
"""
editor = self._editor
block = editor.document().findBlockByNumber(line_number)
if block.isValid():
return int(editor.blockBoundingGeometry(block).translated(
editor.contentOffset()).top())
if line_number <= 0:
return 0
else:
return int(editor.blockBoundingGeometry(
block.previous()).translated(editor.contentOffset()).bottom()) | ['def', 'line_pos_from_number', '(', 'self', ',', 'line_number', ')', ':', 'editor', '=', 'self', '.', '_editor', 'block', '=', 'editor', '.', 'document', '(', ')', '.', 'findBlockByNumber', '(', 'line_number', ')', 'if', 'block', '.', 'isValid', '(', ')', ':', 'return', 'int', '(', 'editor', '.', 'blockBoundingGeometry', '(', 'block', ')', '.', 'translated', '(', 'editor', '.', 'contentOffset', '(', ')', ')', '.', 'top', '(', ')', ')', 'if', 'line_number', '<=', '0', ':', 'return', '0', 'else', ':', 'return', 'int', '(', 'editor', '.', 'blockBoundingGeometry', '(', 'block', '.', 'previous', '(', ')', ')', '.', 'translated', '(', 'editor', '.', 'contentOffset', '(', ')', ')', '.', 'bottom', '(', ')', ')'] | Computes line position on Y-Axis (at the center of the line) from line
number.
:param line_number: The line number for which we want to know the
position in pixels.
:return: The center position of the line. | ['Computes', 'line', 'position', 'on', 'Y', '-', 'Axis', '(', 'at', 'the', 'center', 'of', 'the', 'line', ')', 'from', 'line', 'number', '.'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/editor.py#L510-L528 |
7,403 | tgalal/python-axolotl | axolotl/state/sessionstate.py | SessionState.setPendingKeyExchange | def setPendingKeyExchange(self, sequence, ourBaseKey, ourRatchetKey, ourIdentityKey):
"""
:type sequence: int
:type ourBaseKey: ECKeyPair
:type ourRatchetKey: ECKeyPair
:type ourIdentityKey: IdentityKeyPair
"""
structure = self.sessionStructure.PendingKeyExchange()
structure.sequence = sequence
structure.localBaseKey = ourBaseKey.getPublicKey().serialize()
structure.localBaseKeyPrivate = ourBaseKey.getPrivateKey().serialize()
structure.localRatchetKey = ourRatchetKey.getPublicKey().serialize()
structure.localRatchetKeyPrivate = ourRatchetKey.getPrivateKey().serialize()
structure.localIdentityKey = ourIdentityKey.getPublicKey().serialize()
structure.localIdentityKeyPrivate = ourIdentityKey.getPrivateKey().serialize()
self.sessionStructure.pendingKeyExchange.MergeFrom(structure) | python | def setPendingKeyExchange(self, sequence, ourBaseKey, ourRatchetKey, ourIdentityKey):
"""
:type sequence: int
:type ourBaseKey: ECKeyPair
:type ourRatchetKey: ECKeyPair
:type ourIdentityKey: IdentityKeyPair
"""
structure = self.sessionStructure.PendingKeyExchange()
structure.sequence = sequence
structure.localBaseKey = ourBaseKey.getPublicKey().serialize()
structure.localBaseKeyPrivate = ourBaseKey.getPrivateKey().serialize()
structure.localRatchetKey = ourRatchetKey.getPublicKey().serialize()
structure.localRatchetKeyPrivate = ourRatchetKey.getPrivateKey().serialize()
structure.localIdentityKey = ourIdentityKey.getPublicKey().serialize()
structure.localIdentityKeyPrivate = ourIdentityKey.getPrivateKey().serialize()
self.sessionStructure.pendingKeyExchange.MergeFrom(structure) | ['def', 'setPendingKeyExchange', '(', 'self', ',', 'sequence', ',', 'ourBaseKey', ',', 'ourRatchetKey', ',', 'ourIdentityKey', ')', ':', 'structure', '=', 'self', '.', 'sessionStructure', '.', 'PendingKeyExchange', '(', ')', 'structure', '.', 'sequence', '=', 'sequence', 'structure', '.', 'localBaseKey', '=', 'ourBaseKey', '.', 'getPublicKey', '(', ')', '.', 'serialize', '(', ')', 'structure', '.', 'localBaseKeyPrivate', '=', 'ourBaseKey', '.', 'getPrivateKey', '(', ')', '.', 'serialize', '(', ')', 'structure', '.', 'localRatchetKey', '=', 'ourRatchetKey', '.', 'getPublicKey', '(', ')', '.', 'serialize', '(', ')', 'structure', '.', 'localRatchetKeyPrivate', '=', 'ourRatchetKey', '.', 'getPrivateKey', '(', ')', '.', 'serialize', '(', ')', 'structure', '.', 'localIdentityKey', '=', 'ourIdentityKey', '.', 'getPublicKey', '(', ')', '.', 'serialize', '(', ')', 'structure', '.', 'localIdentityKeyPrivate', '=', 'ourIdentityKey', '.', 'getPrivateKey', '(', ')', '.', 'serialize', '(', ')', 'self', '.', 'sessionStructure', '.', 'pendingKeyExchange', '.', 'MergeFrom', '(', 'structure', ')'] | :type sequence: int
:type ourBaseKey: ECKeyPair
:type ourRatchetKey: ECKeyPair
:type ourIdentityKey: IdentityKeyPair | [':', 'type', 'sequence', ':', 'int', ':', 'type', 'ourBaseKey', ':', 'ECKeyPair', ':', 'type', 'ourRatchetKey', ':', 'ECKeyPair', ':', 'type', 'ourIdentityKey', ':', 'IdentityKeyPair'] | train | https://github.com/tgalal/python-axolotl/blob/0c681af4b756f556e23a9bf961abfbc6f82800cc/axolotl/state/sessionstate.py#L194-L210 |
7,404 | foremast/foremast | src/foremast/elb/create_elb.py | SpinnakerELB.create_elb | def create_elb(self):
"""Create or Update the ELB after rendering JSON data from configs.
Asserts that the ELB task was successful.
"""
json_data = self.make_elb_json()
LOG.debug('Block ELB JSON Data:\n%s', pformat(json_data))
wait_for_task(json_data)
self.add_listener_policy(json_data)
self.add_backend_policy(json_data)
self.configure_attributes(json_data) | python | def create_elb(self):
"""Create or Update the ELB after rendering JSON data from configs.
Asserts that the ELB task was successful.
"""
json_data = self.make_elb_json()
LOG.debug('Block ELB JSON Data:\n%s', pformat(json_data))
wait_for_task(json_data)
self.add_listener_policy(json_data)
self.add_backend_policy(json_data)
self.configure_attributes(json_data) | ['def', 'create_elb', '(', 'self', ')', ':', 'json_data', '=', 'self', '.', 'make_elb_json', '(', ')', 'LOG', '.', 'debug', '(', "'Block ELB JSON Data:\\n%s'", ',', 'pformat', '(', 'json_data', ')', ')', 'wait_for_task', '(', 'json_data', ')', 'self', '.', 'add_listener_policy', '(', 'json_data', ')', 'self', '.', 'add_backend_policy', '(', 'json_data', ')', 'self', '.', 'configure_attributes', '(', 'json_data', ')'] | Create or Update the ELB after rendering JSON data from configs.
Asserts that the ELB task was successful. | ['Create', 'or', 'Update', 'the', 'ELB', 'after', 'rendering', 'JSON', 'data', 'from', 'configs', '.', 'Asserts', 'that', 'the', 'ELB', 'task', 'was', 'successful', '.'] | train | https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/elb/create_elb.py#L114-L127 |
7,405 | anomaly/prestans | prestans/rest/response.py | Response._set_serializer_by_mime_type | def _set_serializer_by_mime_type(self, mime_type):
"""
:param mime_type:
:return:
used by content_type_set to set get a reference to the appropriate serializer
"""
# ignore if binary response
if isinstance(self._app_iter, BinaryResponse):
self.logger.info("ignoring setting serializer for binary response")
return
for available_serializer in self._serializers:
if available_serializer.content_type() == mime_type:
self._selected_serializer = available_serializer
self.logger.info("set serializer for mime type: %s" % mime_type)
return
self.logger.info("could not find serializer for mime type: %s" % mime_type)
raise exception.UnsupportedVocabularyError(mime_type, self.supported_mime_types_str) | python | def _set_serializer_by_mime_type(self, mime_type):
"""
:param mime_type:
:return:
used by content_type_set to set get a reference to the appropriate serializer
"""
# ignore if binary response
if isinstance(self._app_iter, BinaryResponse):
self.logger.info("ignoring setting serializer for binary response")
return
for available_serializer in self._serializers:
if available_serializer.content_type() == mime_type:
self._selected_serializer = available_serializer
self.logger.info("set serializer for mime type: %s" % mime_type)
return
self.logger.info("could not find serializer for mime type: %s" % mime_type)
raise exception.UnsupportedVocabularyError(mime_type, self.supported_mime_types_str) | ['def', '_set_serializer_by_mime_type', '(', 'self', ',', 'mime_type', ')', ':', '# ignore if binary response', 'if', 'isinstance', '(', 'self', '.', '_app_iter', ',', 'BinaryResponse', ')', ':', 'self', '.', 'logger', '.', 'info', '(', '"ignoring setting serializer for binary response"', ')', 'return', 'for', 'available_serializer', 'in', 'self', '.', '_serializers', ':', 'if', 'available_serializer', '.', 'content_type', '(', ')', '==', 'mime_type', ':', 'self', '.', '_selected_serializer', '=', 'available_serializer', 'self', '.', 'logger', '.', 'info', '(', '"set serializer for mime type: %s"', '%', 'mime_type', ')', 'return', 'self', '.', 'logger', '.', 'info', '(', '"could not find serializer for mime type: %s"', '%', 'mime_type', ')', 'raise', 'exception', '.', 'UnsupportedVocabularyError', '(', 'mime_type', ',', 'self', '.', 'supported_mime_types_str', ')'] | :param mime_type:
:return:
used by content_type_set to set get a reference to the appropriate serializer | [':', 'param', 'mime_type', ':', ':', 'return', ':'] | train | https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/rest/response.py#L76-L96 |
7,406 | ucbvislab/radiotool | radiotool/composer/composition.py | Composition.add_segments | def add_segments(self, segments):
"""Add a list of segments to the composition
:param segments: Segments to add to composition
:type segments: list of :py:class:`radiotool.composer.Segment`
"""
self.tracks.update([seg.track for seg in segments])
self.segments.extend(segments) | python | def add_segments(self, segments):
"""Add a list of segments to the composition
:param segments: Segments to add to composition
:type segments: list of :py:class:`radiotool.composer.Segment`
"""
self.tracks.update([seg.track for seg in segments])
self.segments.extend(segments) | ['def', 'add_segments', '(', 'self', ',', 'segments', ')', ':', 'self', '.', 'tracks', '.', 'update', '(', '[', 'seg', '.', 'track', 'for', 'seg', 'in', 'segments', ']', ')', 'self', '.', 'segments', '.', 'extend', '(', 'segments', ')'] | Add a list of segments to the composition
:param segments: Segments to add to composition
:type segments: list of :py:class:`radiotool.composer.Segment` | ['Add', 'a', 'list', 'of', 'segments', 'to', 'the', 'composition'] | train | https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L98-L105 |
7,407 | spencerahill/aospy | aospy/data_loader.py | _prep_time_data | def _prep_time_data(ds):
"""Prepare time coordinate information in Dataset for use in aospy.
1. If the Dataset contains a time bounds coordinate, add attributes
representing the true beginning and end dates of the time interval used
to construct the Dataset
2. If the Dataset contains a time bounds coordinate, overwrite the time
coordinate values with the averages of the time bounds at each timestep
3. Decode the times into np.datetime64 objects for time indexing
Parameters
----------
ds : Dataset
Pre-processed Dataset with time coordinate renamed to
internal_names.TIME_STR
Returns
-------
Dataset
The processed Dataset
"""
ds = times.ensure_time_as_index(ds)
if TIME_BOUNDS_STR in ds:
ds = times.ensure_time_avg_has_cf_metadata(ds)
ds[TIME_STR] = times.average_time_bounds(ds)
else:
logging.warning("dt array not found. Assuming equally spaced "
"values in time, even though this may not be "
"the case")
ds = times.add_uniform_time_weights(ds)
# Suppress enable_cftimeindex is a no-op warning; we'll keep setting it for
# now to maintain backwards compatibility for older xarray versions.
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
with xr.set_options(enable_cftimeindex=True):
ds = xr.decode_cf(ds, decode_times=True, decode_coords=False,
mask_and_scale=True)
return ds | python | def _prep_time_data(ds):
"""Prepare time coordinate information in Dataset for use in aospy.
1. If the Dataset contains a time bounds coordinate, add attributes
representing the true beginning and end dates of the time interval used
to construct the Dataset
2. If the Dataset contains a time bounds coordinate, overwrite the time
coordinate values with the averages of the time bounds at each timestep
3. Decode the times into np.datetime64 objects for time indexing
Parameters
----------
ds : Dataset
Pre-processed Dataset with time coordinate renamed to
internal_names.TIME_STR
Returns
-------
Dataset
The processed Dataset
"""
ds = times.ensure_time_as_index(ds)
if TIME_BOUNDS_STR in ds:
ds = times.ensure_time_avg_has_cf_metadata(ds)
ds[TIME_STR] = times.average_time_bounds(ds)
else:
logging.warning("dt array not found. Assuming equally spaced "
"values in time, even though this may not be "
"the case")
ds = times.add_uniform_time_weights(ds)
# Suppress enable_cftimeindex is a no-op warning; we'll keep setting it for
# now to maintain backwards compatibility for older xarray versions.
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
with xr.set_options(enable_cftimeindex=True):
ds = xr.decode_cf(ds, decode_times=True, decode_coords=False,
mask_and_scale=True)
return ds | ['def', '_prep_time_data', '(', 'ds', ')', ':', 'ds', '=', 'times', '.', 'ensure_time_as_index', '(', 'ds', ')', 'if', 'TIME_BOUNDS_STR', 'in', 'ds', ':', 'ds', '=', 'times', '.', 'ensure_time_avg_has_cf_metadata', '(', 'ds', ')', 'ds', '[', 'TIME_STR', ']', '=', 'times', '.', 'average_time_bounds', '(', 'ds', ')', 'else', ':', 'logging', '.', 'warning', '(', '"dt array not found. Assuming equally spaced "', '"values in time, even though this may not be "', '"the case"', ')', 'ds', '=', 'times', '.', 'add_uniform_time_weights', '(', 'ds', ')', "# Suppress enable_cftimeindex is a no-op warning; we'll keep setting it for", '# now to maintain backwards compatibility for older xarray versions.', 'with', 'warnings', '.', 'catch_warnings', '(', ')', ':', 'warnings', '.', 'filterwarnings', '(', "'ignore'", ')', 'with', 'xr', '.', 'set_options', '(', 'enable_cftimeindex', '=', 'True', ')', ':', 'ds', '=', 'xr', '.', 'decode_cf', '(', 'ds', ',', 'decode_times', '=', 'True', ',', 'decode_coords', '=', 'False', ',', 'mask_and_scale', '=', 'True', ')', 'return', 'ds'] | Prepare time coordinate information in Dataset for use in aospy.
1. If the Dataset contains a time bounds coordinate, add attributes
representing the true beginning and end dates of the time interval used
to construct the Dataset
2. If the Dataset contains a time bounds coordinate, overwrite the time
coordinate values with the averages of the time bounds at each timestep
3. Decode the times into np.datetime64 objects for time indexing
Parameters
----------
ds : Dataset
Pre-processed Dataset with time coordinate renamed to
internal_names.TIME_STR
Returns
-------
Dataset
The processed Dataset | ['Prepare', 'time', 'coordinate', 'information', 'in', 'Dataset', 'for', 'use', 'in', 'aospy', '.'] | train | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/data_loader.py#L179-L216 |
7,408 | mattsolo1/hmmerclust | hmmerclust/hmmerclust.py | OrganismDB.find_loci | def find_loci(self, cluster_size, maxgap, locusview=False, colordict=None):
'''
Finds the loci of a given cluster size & maximum gap between cluster members.
Args
cluster_size (int): minimum number of genes in the cluster.
maxgap (int): max basepair gap between genes in the cluster.
Kwargs
locusview (bool): whether or not a map is generated for the locus_parent_organism
colordict (list): pass a pre-made color scheme for identified proteins
'''
if colordict != None:
self.search.protein_arrow_color_dict = colordict
for organism in self.organisms:
print 'finding loci for', organism.name
#reset loci if there is something in there already
organism.loci = []
orghits = []
for protein in organism.proteins:
if len(protein.hmm_hit_list) > 0:
orghits.append((organism.accession, protein.accession,
protein.start_bp, protein.end_bp, protein))
bp_start_pooled = [hit[2] for hit in orghits]
try:
clustered_data = self.cluster_number(bp_start_pooled, maxgap)
significant_cluster_list = []
for cluster in clustered_data:
if len(cluster) > cluster_size:
significant_cluster_list.append(cluster)
#print significant_cluster_list
for cluster in significant_cluster_list:
proteins_in_locus = []
cluster.sort()
for bp_start in cluster:
for hit in orghits:
if bp_start == hit[2]:
proteins_in_locus.append(hit[4])
organism.loci.append(Locus(proteins_in_locus,
organism,
self.search.query_names,
locusview))
except IndexError,e:
print 'Index error', str(e), organism.name
print 'total of', str(len(organism.loci)), 'found for', organism.name | python | def find_loci(self, cluster_size, maxgap, locusview=False, colordict=None):
'''
Finds the loci of a given cluster size & maximum gap between cluster members.
Args
cluster_size (int): minimum number of genes in the cluster.
maxgap (int): max basepair gap between genes in the cluster.
Kwargs
locusview (bool): whether or not a map is generated for the locus_parent_organism
colordict (list): pass a pre-made color scheme for identified proteins
'''
if colordict != None:
self.search.protein_arrow_color_dict = colordict
for organism in self.organisms:
print 'finding loci for', organism.name
#reset loci if there is something in there already
organism.loci = []
orghits = []
for protein in organism.proteins:
if len(protein.hmm_hit_list) > 0:
orghits.append((organism.accession, protein.accession,
protein.start_bp, protein.end_bp, protein))
bp_start_pooled = [hit[2] for hit in orghits]
try:
clustered_data = self.cluster_number(bp_start_pooled, maxgap)
significant_cluster_list = []
for cluster in clustered_data:
if len(cluster) > cluster_size:
significant_cluster_list.append(cluster)
#print significant_cluster_list
for cluster in significant_cluster_list:
proteins_in_locus = []
cluster.sort()
for bp_start in cluster:
for hit in orghits:
if bp_start == hit[2]:
proteins_in_locus.append(hit[4])
organism.loci.append(Locus(proteins_in_locus,
organism,
self.search.query_names,
locusview))
except IndexError,e:
print 'Index error', str(e), organism.name
print 'total of', str(len(organism.loci)), 'found for', organism.name | ['def', 'find_loci', '(', 'self', ',', 'cluster_size', ',', 'maxgap', ',', 'locusview', '=', 'False', ',', 'colordict', '=', 'None', ')', ':', 'if', 'colordict', '!=', 'None', ':', 'self', '.', 'search', '.', 'protein_arrow_color_dict', '=', 'colordict', 'for', 'organism', 'in', 'self', '.', 'organisms', ':', 'print', "'finding loci for'", ',', 'organism', '.', 'name', '#reset loci if there is something in there already', 'organism', '.', 'loci', '=', '[', ']', 'orghits', '=', '[', ']', 'for', 'protein', 'in', 'organism', '.', 'proteins', ':', 'if', 'len', '(', 'protein', '.', 'hmm_hit_list', ')', '>', '0', ':', 'orghits', '.', 'append', '(', '(', 'organism', '.', 'accession', ',', 'protein', '.', 'accession', ',', 'protein', '.', 'start_bp', ',', 'protein', '.', 'end_bp', ',', 'protein', ')', ')', 'bp_start_pooled', '=', '[', 'hit', '[', '2', ']', 'for', 'hit', 'in', 'orghits', ']', 'try', ':', 'clustered_data', '=', 'self', '.', 'cluster_number', '(', 'bp_start_pooled', ',', 'maxgap', ')', 'significant_cluster_list', '=', '[', ']', 'for', 'cluster', 'in', 'clustered_data', ':', 'if', 'len', '(', 'cluster', ')', '>', 'cluster_size', ':', 'significant_cluster_list', '.', 'append', '(', 'cluster', ')', '#print significant_cluster_list', 'for', 'cluster', 'in', 'significant_cluster_list', ':', 'proteins_in_locus', '=', '[', ']', 'cluster', '.', 'sort', '(', ')', 'for', 'bp_start', 'in', 'cluster', ':', 'for', 'hit', 'in', 'orghits', ':', 'if', 'bp_start', '==', 'hit', '[', '2', ']', ':', 'proteins_in_locus', '.', 'append', '(', 'hit', '[', '4', ']', ')', 'organism', '.', 'loci', '.', 'append', '(', 'Locus', '(', 'proteins_in_locus', ',', 'organism', ',', 'self', '.', 'search', '.', 'query_names', ',', 'locusview', ')', ')', 'except', 'IndexError', ',', 'e', ':', 'print', "'Index error'", ',', 'str', '(', 'e', ')', ',', 'organism', '.', 'name', 'print', "'total of'", ',', 'str', '(', 'len', '(', 'organism', '.', 'loci', ')', ')', ',', "'found for'", ',', 'organism', '.', 'name'] | Finds the loci of a given cluster size & maximum gap between cluster members.
Args
cluster_size (int): minimum number of genes in the cluster.
maxgap (int): max basepair gap between genes in the cluster.
Kwargs
locusview (bool): whether or not a map is generated for the locus_parent_organism
colordict (list): pass a pre-made color scheme for identified proteins | ['Finds', 'the', 'loci', 'of', 'a', 'given', 'cluster', 'size', '&', 'maximum', 'gap', 'between', 'cluster', 'members', '.'] | train | https://github.com/mattsolo1/hmmerclust/blob/471596043a660097ed8b11430d42118a8fd25798/hmmerclust/hmmerclust.py#L296-L355 |
7,409 | mojaie/chorus | chorus/util/debug.py | mute | def mute(func):
""" Decorator
Make stdout silent
"""
def _f(*args, **kwargs):
sys.stdout = open(os.devnull, 'w')
res = func(*args, **kwargs)
sys.stdout.close()
sys.stdout = sys.__stdout__
return res
return _f | python | def mute(func):
""" Decorator
Make stdout silent
"""
def _f(*args, **kwargs):
sys.stdout = open(os.devnull, 'w')
res = func(*args, **kwargs)
sys.stdout.close()
sys.stdout = sys.__stdout__
return res
return _f | ['def', 'mute', '(', 'func', ')', ':', 'def', '_f', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'sys', '.', 'stdout', '=', 'open', '(', 'os', '.', 'devnull', ',', "'w'", ')', 'res', '=', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'sys', '.', 'stdout', '.', 'close', '(', ')', 'sys', '.', 'stdout', '=', 'sys', '.', '__stdout__', 'return', 'res', 'return', '_f'] | Decorator
Make stdout silent | ['Decorator', 'Make', 'stdout', 'silent'] | train | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/util/debug.py#L97-L107 |
7,410 | daler/gffutils | gffutils/create.py | _DBCreator.create | def create(self):
"""
Calls various methods sequentially in order to fully build the
database.
"""
# Calls each of these methods in order. _populate_from_lines and
# _update_relations must be implemented in subclasses.
self._init_tables()
self._populate_from_lines(self.iterator)
self._update_relations()
self._finalize() | python | def create(self):
"""
Calls various methods sequentially in order to fully build the
database.
"""
# Calls each of these methods in order. _populate_from_lines and
# _update_relations must be implemented in subclasses.
self._init_tables()
self._populate_from_lines(self.iterator)
self._update_relations()
self._finalize() | ['def', 'create', '(', 'self', ')', ':', '# Calls each of these methods in order. _populate_from_lines and', '# _update_relations must be implemented in subclasses.', 'self', '.', '_init_tables', '(', ')', 'self', '.', '_populate_from_lines', '(', 'self', '.', 'iterator', ')', 'self', '.', '_update_relations', '(', ')', 'self', '.', '_finalize', '(', ')'] | Calls various methods sequentially in order to fully build the
database. | ['Calls', 'various', 'methods', 'sequentially', 'in', 'order', 'to', 'fully', 'build', 'the', 'database', '.'] | train | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/create.py#L507-L517 |
7,411 | data61/clkhash | clkhash/cli.py | create_project | def create_project(type, schema, server, name, output, verbose):
"""Create a new project on an entity matching server.
See entity matching service documentation for details on mapping type and schema
Returns authentication details for the created project.
"""
if verbose:
log("Entity Matching Server: {}".format(server))
if schema is not None:
schema_json = json.load(schema)
# Validate the schema
clkhash.schema.validate_schema_dict(schema_json)
else:
raise ValueError("Schema must be provided when creating new linkage project")
name = name if name is not None else ''
# Creating new project
try:
project_creation_reply = project_create(server, schema_json, type, name)
except ServiceError as e:
log("Unexpected response - {}".format(e.status_code))
log(e.text)
raise SystemExit
else:
log("Project created")
json.dump(project_creation_reply, output) | python | def create_project(type, schema, server, name, output, verbose):
"""Create a new project on an entity matching server.
See entity matching service documentation for details on mapping type and schema
Returns authentication details for the created project.
"""
if verbose:
log("Entity Matching Server: {}".format(server))
if schema is not None:
schema_json = json.load(schema)
# Validate the schema
clkhash.schema.validate_schema_dict(schema_json)
else:
raise ValueError("Schema must be provided when creating new linkage project")
name = name if name is not None else ''
# Creating new project
try:
project_creation_reply = project_create(server, schema_json, type, name)
except ServiceError as e:
log("Unexpected response - {}".format(e.status_code))
log(e.text)
raise SystemExit
else:
log("Project created")
json.dump(project_creation_reply, output) | ['def', 'create_project', '(', 'type', ',', 'schema', ',', 'server', ',', 'name', ',', 'output', ',', 'verbose', ')', ':', 'if', 'verbose', ':', 'log', '(', '"Entity Matching Server: {}"', '.', 'format', '(', 'server', ')', ')', 'if', 'schema', 'is', 'not', 'None', ':', 'schema_json', '=', 'json', '.', 'load', '(', 'schema', ')', '# Validate the schema', 'clkhash', '.', 'schema', '.', 'validate_schema_dict', '(', 'schema_json', ')', 'else', ':', 'raise', 'ValueError', '(', '"Schema must be provided when creating new linkage project"', ')', 'name', '=', 'name', 'if', 'name', 'is', 'not', 'None', 'else', "''", '# Creating new project', 'try', ':', 'project_creation_reply', '=', 'project_create', '(', 'server', ',', 'schema_json', ',', 'type', ',', 'name', ')', 'except', 'ServiceError', 'as', 'e', ':', 'log', '(', '"Unexpected response - {}"', '.', 'format', '(', 'e', '.', 'status_code', ')', ')', 'log', '(', 'e', '.', 'text', ')', 'raise', 'SystemExit', 'else', ':', 'log', '(', '"Project created"', ')', 'json', '.', 'dump', '(', 'project_creation_reply', ',', 'output', ')'] | Create a new project on an entity matching server.
See entity matching service documentation for details on mapping type and schema
Returns authentication details for the created project. | ['Create', 'a', 'new', 'project', 'on', 'an', 'entity', 'matching', 'server', '.'] | train | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/cli.py#L143-L171 |
7,412 | pandas-dev/pandas | pandas/core/indexes/datetimelike.py | DatetimeIndexOpsMixin.argmin | def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin() | python | def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin() | ['def', 'argmin', '(', 'self', ',', 'axis', '=', 'None', ',', 'skipna', '=', 'True', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'nv', '.', 'validate_argmin', '(', 'args', ',', 'kwargs', ')', 'nv', '.', 'validate_minmax_axis', '(', 'axis', ')', 'i8', '=', 'self', '.', 'asi8', 'if', 'self', '.', 'hasnans', ':', 'mask', '=', 'self', '.', '_isnan', 'if', 'mask', '.', 'all', '(', ')', 'or', 'not', 'skipna', ':', 'return', '-', '1', 'i8', '=', 'i8', '.', 'copy', '(', ')', 'i8', '[', 'mask', ']', '=', 'np', '.', 'iinfo', '(', "'int64'", ')', '.', 'max', 'return', 'i8', '.', 'argmin', '(', ')'] | Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin | ['Returns', 'the', 'indices', 'of', 'the', 'minimum', 'values', 'along', 'an', 'axis', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L349-L370 |
7,413 | Julius2342/pyvlx | pyvlx/frames/frame_activate_scene.py | FrameActivateSceneConfirmation.get_payload | def get_payload(self):
"""Return Payload."""
ret = bytes([self.status.value])
ret += bytes([self.session_id >> 8 & 255, self.session_id & 255])
return ret | python | def get_payload(self):
"""Return Payload."""
ret = bytes([self.status.value])
ret += bytes([self.session_id >> 8 & 255, self.session_id & 255])
return ret | ['def', 'get_payload', '(', 'self', ')', ':', 'ret', '=', 'bytes', '(', '[', 'self', '.', 'status', '.', 'value', ']', ')', 'ret', '+=', 'bytes', '(', '[', 'self', '.', 'session_id', '>>', '8', '&', '255', ',', 'self', '.', 'session_id', '&', '255', ']', ')', 'return', 'ret'] | Return Payload. | ['Return', 'Payload', '.'] | train | https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_activate_scene.py#L65-L69 |
7,414 | DataDog/integrations-core | datadog_checks_base/datadog_checks/base/utils/timeout.py | timeout | def timeout(timeout):
"""
A decorator to timeout a function. Decorated method calls are executed in a separate new thread
with a specified timeout.
Also check if a thread for the same function already exists before creating a new one.
Note: Compatible with Windows (thread based).
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = "{0}:{1}:{2}:{3}".format(id(func), func.__name__, args, kwargs)
if key in _thread_by_func:
# A thread for the same function already exists.
worker = _thread_by_func[key]
else:
worker = ThreadMethod(func, args, kwargs)
_thread_by_func[key] = worker
worker.join(timeout)
if worker.is_alive():
raise TimeoutException()
del _thread_by_func[key]
if worker.exception:
raise worker.exception
else:
return worker.result
return wrapper
return decorator | python | def timeout(timeout):
"""
A decorator to timeout a function. Decorated method calls are executed in a separate new thread
with a specified timeout.
Also check if a thread for the same function already exists before creating a new one.
Note: Compatible with Windows (thread based).
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = "{0}:{1}:{2}:{3}".format(id(func), func.__name__, args, kwargs)
if key in _thread_by_func:
# A thread for the same function already exists.
worker = _thread_by_func[key]
else:
worker = ThreadMethod(func, args, kwargs)
_thread_by_func[key] = worker
worker.join(timeout)
if worker.is_alive():
raise TimeoutException()
del _thread_by_func[key]
if worker.exception:
raise worker.exception
else:
return worker.result
return wrapper
return decorator | ['def', 'timeout', '(', 'timeout', ')', ':', 'def', 'decorator', '(', 'func', ')', ':', '@', 'functools', '.', 'wraps', '(', 'func', ')', 'def', 'wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'key', '=', '"{0}:{1}:{2}:{3}"', '.', 'format', '(', 'id', '(', 'func', ')', ',', 'func', '.', '__name__', ',', 'args', ',', 'kwargs', ')', 'if', 'key', 'in', '_thread_by_func', ':', '# A thread for the same function already exists.', 'worker', '=', '_thread_by_func', '[', 'key', ']', 'else', ':', 'worker', '=', 'ThreadMethod', '(', 'func', ',', 'args', ',', 'kwargs', ')', '_thread_by_func', '[', 'key', ']', '=', 'worker', 'worker', '.', 'join', '(', 'timeout', ')', 'if', 'worker', '.', 'is_alive', '(', ')', ':', 'raise', 'TimeoutException', '(', ')', 'del', '_thread_by_func', '[', 'key', ']', 'if', 'worker', '.', 'exception', ':', 'raise', 'worker', '.', 'exception', 'else', ':', 'return', 'worker', '.', 'result', 'return', 'wrapper', 'return', 'decorator'] | A decorator to timeout a function. Decorated method calls are executed in a separate new thread
with a specified timeout.
Also check if a thread for the same function already exists before creating a new one.
Note: Compatible with Windows (thread based). | ['A', 'decorator', 'to', 'timeout', 'a', 'function', '.', 'Decorated', 'method', 'calls', 'are', 'executed', 'in', 'a', 'separate', 'new', 'thread', 'with', 'a', 'specified', 'timeout', '.', 'Also', 'check', 'if', 'a', 'thread', 'for', 'the', 'same', 'function', 'already', 'exists', 'before', 'creating', 'a', 'new', 'one', '.', 'Note', ':', 'Compatible', 'with', 'Windows', '(', 'thread', 'based', ')', '.'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/utils/timeout.py#L41-L74 |
7,415 | cebel/pyctd | src/pyctd/manager/database.py | DbManager.download_urls | def download_urls(cls, urls, force_download=False):
"""Downloads all CTD URLs that don't exist
:param iter[str] urls: iterable of URL of CTD
:param bool force_download: force method to download
"""
for url in urls:
file_path = cls.get_path_to_file_from_url(url)
if os.path.exists(file_path) and not force_download:
log.info('already downloaded %s to %s', url, file_path)
else:
log.info('downloading %s to %s', url, file_path)
download_timer = time.time()
urlretrieve(url, file_path)
log.info('downloaded in %.2f seconds', time.time() - download_timer) | python | def download_urls(cls, urls, force_download=False):
"""Downloads all CTD URLs that don't exist
:param iter[str] urls: iterable of URL of CTD
:param bool force_download: force method to download
"""
for url in urls:
file_path = cls.get_path_to_file_from_url(url)
if os.path.exists(file_path) and not force_download:
log.info('already downloaded %s to %s', url, file_path)
else:
log.info('downloading %s to %s', url, file_path)
download_timer = time.time()
urlretrieve(url, file_path)
log.info('downloaded in %.2f seconds', time.time() - download_timer) | ['def', 'download_urls', '(', 'cls', ',', 'urls', ',', 'force_download', '=', 'False', ')', ':', 'for', 'url', 'in', 'urls', ':', 'file_path', '=', 'cls', '.', 'get_path_to_file_from_url', '(', 'url', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'file_path', ')', 'and', 'not', 'force_download', ':', 'log', '.', 'info', '(', "'already downloaded %s to %s'", ',', 'url', ',', 'file_path', ')', 'else', ':', 'log', '.', 'info', '(', "'downloading %s to %s'", ',', 'url', ',', 'file_path', ')', 'download_timer', '=', 'time', '.', 'time', '(', ')', 'urlretrieve', '(', 'url', ',', 'file_path', ')', 'log', '.', 'info', '(', "'downloaded in %.2f seconds'", ',', 'time', '.', 'time', '(', ')', '-', 'download_timer', ')'] | Downloads all CTD URLs that don't exist
:param iter[str] urls: iterable of URL of CTD
:param bool force_download: force method to download | ['Downloads', 'all', 'CTD', 'URLs', 'that', 'don', 't', 'exist', ':', 'param', 'iter', '[', 'str', ']', 'urls', ':', 'iterable', 'of', 'URL', 'of', 'CTD', ':', 'param', 'bool', 'force_download', ':', 'force', 'method', 'to', 'download'] | train | https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/database.py#L414-L429 |
7,416 | cloudtools/stacker | stacker/util.py | yaml_to_ordered_dict | def yaml_to_ordered_dict(stream, loader=yaml.SafeLoader):
"""Provides yaml.load alternative with preserved dictionary order.
Args:
stream (string): YAML string to load.
loader (:class:`yaml.loader`): PyYAML loader class. Defaults to safe
load.
Returns:
OrderedDict: Parsed YAML.
"""
class OrderedUniqueLoader(loader):
"""
Subclasses the given pyYAML `loader` class.
Validates all sibling keys to insure no duplicates.
Returns an OrderedDict instead of a Dict.
"""
# keys which require no duplicate siblings.
NO_DUPE_SIBLINGS = ["stacks", "class_path"]
# keys which require no duplicate children keys.
NO_DUPE_CHILDREN = ["stacks"]
def _error_mapping_on_dupe(self, node, node_name):
"""check mapping node for dupe children keys."""
if isinstance(node, MappingNode):
mapping = {}
for n in node.value:
a = n[0]
b = mapping.get(a.value, None)
if b:
msg = "{} mapping cannot have duplicate keys {} {}"
raise ConstructorError(
msg.format(node_name, b.start_mark, a.start_mark)
)
mapping[a.value] = a
def _validate_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(
None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found unhashable key (%s)" % exc, key_node.start_mark
)
# prevent duplicate sibling keys for certain "keywords".
if key in mapping and key in self.NO_DUPE_SIBLINGS:
msg = "{} key cannot have duplicate siblings {} {}"
raise ConstructorError(
msg.format(key, node.start_mark, key_node.start_mark)
)
if key in self.NO_DUPE_CHILDREN:
# prevent duplicate children keys for this mapping.
self._error_mapping_on_dupe(value_node, key_node.value)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_mapping(self, node, deep=False):
"""Override parent method to use OrderedDict."""
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return self._validate_mapping(node, deep=deep)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
OrderedUniqueLoader.add_constructor(
u'tag:yaml.org,2002:map', OrderedUniqueLoader.construct_yaml_map,
)
return yaml.load(stream, OrderedUniqueLoader) | python | def yaml_to_ordered_dict(stream, loader=yaml.SafeLoader):
"""Provides yaml.load alternative with preserved dictionary order.
Args:
stream (string): YAML string to load.
loader (:class:`yaml.loader`): PyYAML loader class. Defaults to safe
load.
Returns:
OrderedDict: Parsed YAML.
"""
class OrderedUniqueLoader(loader):
"""
Subclasses the given pyYAML `loader` class.
Validates all sibling keys to insure no duplicates.
Returns an OrderedDict instead of a Dict.
"""
# keys which require no duplicate siblings.
NO_DUPE_SIBLINGS = ["stacks", "class_path"]
# keys which require no duplicate children keys.
NO_DUPE_CHILDREN = ["stacks"]
def _error_mapping_on_dupe(self, node, node_name):
"""check mapping node for dupe children keys."""
if isinstance(node, MappingNode):
mapping = {}
for n in node.value:
a = n[0]
b = mapping.get(a.value, None)
if b:
msg = "{} mapping cannot have duplicate keys {} {}"
raise ConstructorError(
msg.format(node_name, b.start_mark, a.start_mark)
)
mapping[a.value] = a
def _validate_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(
None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found unhashable key (%s)" % exc, key_node.start_mark
)
# prevent duplicate sibling keys for certain "keywords".
if key in mapping and key in self.NO_DUPE_SIBLINGS:
msg = "{} key cannot have duplicate siblings {} {}"
raise ConstructorError(
msg.format(key, node.start_mark, key_node.start_mark)
)
if key in self.NO_DUPE_CHILDREN:
# prevent duplicate children keys for this mapping.
self._error_mapping_on_dupe(value_node, key_node.value)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_mapping(self, node, deep=False):
"""Override parent method to use OrderedDict."""
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return self._validate_mapping(node, deep=deep)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
OrderedUniqueLoader.add_constructor(
u'tag:yaml.org,2002:map', OrderedUniqueLoader.construct_yaml_map,
)
return yaml.load(stream, OrderedUniqueLoader) | ['def', 'yaml_to_ordered_dict', '(', 'stream', ',', 'loader', '=', 'yaml', '.', 'SafeLoader', ')', ':', 'class', 'OrderedUniqueLoader', '(', 'loader', ')', ':', '"""\n Subclasses the given pyYAML `loader` class.\n\n Validates all sibling keys to insure no duplicates.\n\n Returns an OrderedDict instead of a Dict.\n """', '# keys which require no duplicate siblings.', 'NO_DUPE_SIBLINGS', '=', '[', '"stacks"', ',', '"class_path"', ']', '# keys which require no duplicate children keys.', 'NO_DUPE_CHILDREN', '=', '[', '"stacks"', ']', 'def', '_error_mapping_on_dupe', '(', 'self', ',', 'node', ',', 'node_name', ')', ':', '"""check mapping node for dupe children keys."""', 'if', 'isinstance', '(', 'node', ',', 'MappingNode', ')', ':', 'mapping', '=', '{', '}', 'for', 'n', 'in', 'node', '.', 'value', ':', 'a', '=', 'n', '[', '0', ']', 'b', '=', 'mapping', '.', 'get', '(', 'a', '.', 'value', ',', 'None', ')', 'if', 'b', ':', 'msg', '=', '"{} mapping cannot have duplicate keys {} {}"', 'raise', 'ConstructorError', '(', 'msg', '.', 'format', '(', 'node_name', ',', 'b', '.', 'start_mark', ',', 'a', '.', 'start_mark', ')', ')', 'mapping', '[', 'a', '.', 'value', ']', '=', 'a', 'def', '_validate_mapping', '(', 'self', ',', 'node', ',', 'deep', '=', 'False', ')', ':', 'if', 'not', 'isinstance', '(', 'node', ',', 'MappingNode', ')', ':', 'raise', 'ConstructorError', '(', 'None', ',', 'None', ',', '"expected a mapping node, but found %s"', '%', 'node', '.', 'id', ',', 'node', '.', 'start_mark', ')', 'mapping', '=', 'OrderedDict', '(', ')', 'for', 'key_node', ',', 'value_node', 'in', 'node', '.', 'value', ':', 'key', '=', 'self', '.', 'construct_object', '(', 'key_node', ',', 'deep', '=', 'deep', ')', 'try', ':', 'hash', '(', 'key', ')', 'except', 'TypeError', 'as', 'exc', ':', 'raise', 'ConstructorError', '(', '"while constructing a mapping"', ',', 'node', '.', 'start_mark', ',', '"found unhashable key (%s)"', '%', 'exc', ',', 'key_node', '.', 'start_mark', ')', '# prevent duplicate sibling keys for certain "keywords".', 'if', 'key', 'in', 'mapping', 'and', 'key', 'in', 'self', '.', 'NO_DUPE_SIBLINGS', ':', 'msg', '=', '"{} key cannot have duplicate siblings {} {}"', 'raise', 'ConstructorError', '(', 'msg', '.', 'format', '(', 'key', ',', 'node', '.', 'start_mark', ',', 'key_node', '.', 'start_mark', ')', ')', 'if', 'key', 'in', 'self', '.', 'NO_DUPE_CHILDREN', ':', '# prevent duplicate children keys for this mapping.', 'self', '.', '_error_mapping_on_dupe', '(', 'value_node', ',', 'key_node', '.', 'value', ')', 'value', '=', 'self', '.', 'construct_object', '(', 'value_node', ',', 'deep', '=', 'deep', ')', 'mapping', '[', 'key', ']', '=', 'value', 'return', 'mapping', 'def', 'construct_mapping', '(', 'self', ',', 'node', ',', 'deep', '=', 'False', ')', ':', '"""Override parent method to use OrderedDict."""', 'if', 'isinstance', '(', 'node', ',', 'MappingNode', ')', ':', 'self', '.', 'flatten_mapping', '(', 'node', ')', 'return', 'self', '.', '_validate_mapping', '(', 'node', ',', 'deep', '=', 'deep', ')', 'def', 'construct_yaml_map', '(', 'self', ',', 'node', ')', ':', 'data', '=', 'OrderedDict', '(', ')', 'yield', 'data', 'value', '=', 'self', '.', 'construct_mapping', '(', 'node', ')', 'data', '.', 'update', '(', 'value', ')', 'OrderedUniqueLoader', '.', 'add_constructor', '(', "u'tag:yaml.org,2002:map'", ',', 'OrderedUniqueLoader', '.', 'construct_yaml_map', ',', ')', 'return', 'yaml', '.', 'load', '(', 'stream', ',', 'OrderedUniqueLoader', ')'] | Provides yaml.load alternative with preserved dictionary order.
Args:
stream (string): YAML string to load.
loader (:class:`yaml.loader`): PyYAML loader class. Defaults to safe
load.
Returns:
OrderedDict: Parsed YAML. | ['Provides', 'yaml', '.', 'load', 'alternative', 'with', 'preserved', 'dictionary', 'order', '.'] | train | https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/util.py#L237-L320 |
7,417 | ga4gh/ga4gh-server | ga4gh/server/backend.py | Backend.phenotypesGenerator | def phenotypesGenerator(self, request):
"""
Returns a generator over the (phenotypes, nextPageToken) pairs
defined by the (JSON string) request
"""
# TODO make paging work using SPARQL?
compoundId = datamodel.PhenotypeAssociationSetCompoundId.parse(
request.phenotype_association_set_id)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
phenotypeAssociationSet = dataset.getPhenotypeAssociationSet(
compoundId.phenotypeAssociationSetId)
associations = phenotypeAssociationSet.getAssociations(request)
phenotypes = [association.phenotype for association in associations]
return self._protocolListGenerator(
request, phenotypes) | python | def phenotypesGenerator(self, request):
"""
Returns a generator over the (phenotypes, nextPageToken) pairs
defined by the (JSON string) request
"""
# TODO make paging work using SPARQL?
compoundId = datamodel.PhenotypeAssociationSetCompoundId.parse(
request.phenotype_association_set_id)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
phenotypeAssociationSet = dataset.getPhenotypeAssociationSet(
compoundId.phenotypeAssociationSetId)
associations = phenotypeAssociationSet.getAssociations(request)
phenotypes = [association.phenotype for association in associations]
return self._protocolListGenerator(
request, phenotypes) | ['def', 'phenotypesGenerator', '(', 'self', ',', 'request', ')', ':', '# TODO make paging work using SPARQL?', 'compoundId', '=', 'datamodel', '.', 'PhenotypeAssociationSetCompoundId', '.', 'parse', '(', 'request', '.', 'phenotype_association_set_id', ')', 'dataset', '=', 'self', '.', 'getDataRepository', '(', ')', '.', 'getDataset', '(', 'compoundId', '.', 'dataset_id', ')', 'phenotypeAssociationSet', '=', 'dataset', '.', 'getPhenotypeAssociationSet', '(', 'compoundId', '.', 'phenotypeAssociationSetId', ')', 'associations', '=', 'phenotypeAssociationSet', '.', 'getAssociations', '(', 'request', ')', 'phenotypes', '=', '[', 'association', '.', 'phenotype', 'for', 'association', 'in', 'associations', ']', 'return', 'self', '.', '_protocolListGenerator', '(', 'request', ',', 'phenotypes', ')'] | Returns a generator over the (phenotypes, nextPageToken) pairs
defined by the (JSON string) request | ['Returns', 'a', 'generator', 'over', 'the', '(', 'phenotypes', 'nextPageToken', ')', 'pairs', 'defined', 'by', 'the', '(', 'JSON', 'string', ')', 'request'] | train | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/backend.py#L423-L437 |
7,418 | hannorein/rebound | rebound/simulation.py | Simulation.refreshWidgets | def refreshWidgets(self):
"""
This function manually refreshed all widgets attached to this simulation.
You want to call this function if any particle data has been manually changed.
"""
if hasattr(self, '_widgets'):
for w in self._widgets:
w.refresh(isauto=0)
else:
raise RuntimeError("No widgets found") | python | def refreshWidgets(self):
"""
This function manually refreshed all widgets attached to this simulation.
You want to call this function if any particle data has been manually changed.
"""
if hasattr(self, '_widgets'):
for w in self._widgets:
w.refresh(isauto=0)
else:
raise RuntimeError("No widgets found") | ['def', 'refreshWidgets', '(', 'self', ')', ':', 'if', 'hasattr', '(', 'self', ',', "'_widgets'", ')', ':', 'for', 'w', 'in', 'self', '.', '_widgets', ':', 'w', '.', 'refresh', '(', 'isauto', '=', '0', ')', 'else', ':', 'raise', 'RuntimeError', '(', '"No widgets found"', ')'] | This function manually refreshed all widgets attached to this simulation.
You want to call this function if any particle data has been manually changed. | ['This', 'function', 'manually', 'refreshed', 'all', 'widgets', 'attached', 'to', 'this', 'simulation', '.', 'You', 'want', 'to', 'call', 'this', 'function', 'if', 'any', 'particle', 'data', 'has', 'been', 'manually', 'changed', '.'] | train | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L435-L445 |
7,419 | pandas-dev/pandas | pandas/core/indexes/base.py | Index._reindex_non_unique | def _reindex_non_unique(self, target):
"""
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray or None
Indices of output values in original index.
"""
target = ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = ensure_int64(length[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = -1
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer | python | def _reindex_non_unique(self, target):
"""
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray or None
Indices of output values in original index.
"""
target = ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = ensure_int64(length[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = -1
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer | ['def', '_reindex_non_unique', '(', 'self', ',', 'target', ')', ':', 'target', '=', 'ensure_index', '(', 'target', ')', 'indexer', ',', 'missing', '=', 'self', '.', 'get_indexer_non_unique', '(', 'target', ')', 'check', '=', 'indexer', '!=', '-', '1', 'new_labels', '=', 'self', '.', 'take', '(', 'indexer', '[', 'check', ']', ')', 'new_indexer', '=', 'None', 'if', 'len', '(', 'missing', ')', ':', 'length', '=', 'np', '.', 'arange', '(', 'len', '(', 'indexer', ')', ')', 'missing', '=', 'ensure_platform_int', '(', 'missing', ')', 'missing_labels', '=', 'target', '.', 'take', '(', 'missing', ')', 'missing_indexer', '=', 'ensure_int64', '(', 'length', '[', '~', 'check', ']', ')', 'cur_labels', '=', 'self', '.', 'take', '(', 'indexer', '[', 'check', ']', ')', '.', 'values', 'cur_indexer', '=', 'ensure_int64', '(', 'length', '[', 'check', ']', ')', 'new_labels', '=', 'np', '.', 'empty', '(', 'tuple', '(', '[', 'len', '(', 'indexer', ')', ']', ')', ',', 'dtype', '=', 'object', ')', 'new_labels', '[', 'cur_indexer', ']', '=', 'cur_labels', 'new_labels', '[', 'missing_indexer', ']', '=', 'missing_labels', '# a unique indexer', 'if', 'target', '.', 'is_unique', ':', '# see GH5553, make sure we use the right indexer', 'new_indexer', '=', 'np', '.', 'arange', '(', 'len', '(', 'indexer', ')', ')', 'new_indexer', '[', 'cur_indexer', ']', '=', 'np', '.', 'arange', '(', 'len', '(', 'cur_labels', ')', ')', 'new_indexer', '[', 'missing_indexer', ']', '=', '-', '1', '# we have a non_unique selector, need to use the original', '# indexer here', 'else', ':', '# need to retake to have the same size as the indexer', 'indexer', '[', '~', 'check', ']', '=', '-', '1', '# reset the new indexer to account for the new size', 'new_indexer', '=', 'np', '.', 'arange', '(', 'len', '(', 'self', '.', 'take', '(', 'indexer', ')', ')', ')', 'new_indexer', '[', '~', 'check', ']', '=', '-', '1', 'new_index', '=', 'self', '.', '_shallow_copy_with_infer', '(', 'new_labels', ',', 'freq', '=', 'None', ')', 'return', 'new_index', ',', 'indexer', ',', 'new_indexer'] | Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray or None
Indices of output values in original index. | ['Create', 'a', 'new', 'index', 'with', 'target', 's', 'values', '(', 'move', '/', 'add', '/', 'delete', 'values', 'as', 'necessary', ')', 'use', 'with', 'non', '-', 'unique', 'Index', 'and', 'a', 'possibly', 'non', '-', 'unique', 'target', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3144-L3201 |
7,420 | PyCQA/pydocstyle | src/pydocstyle/checker.py | ConventionChecker.check_newline_after_last_paragraph | def check_newline_after_last_paragraph(self, definition, docstring):
"""D209: Put multi-line docstring closing quotes on separate line.
Unless the entire docstring fits on a line, place the closing
quotes on a line by themselves.
"""
if docstring:
lines = [l for l in ast.literal_eval(docstring).split('\n')
if not is_blank(l)]
if len(lines) > 1:
if docstring.split("\n")[-1].strip() not in ['"""', "'''"]:
return violations.D209() | python | def check_newline_after_last_paragraph(self, definition, docstring):
"""D209: Put multi-line docstring closing quotes on separate line.
Unless the entire docstring fits on a line, place the closing
quotes on a line by themselves.
"""
if docstring:
lines = [l for l in ast.literal_eval(docstring).split('\n')
if not is_blank(l)]
if len(lines) > 1:
if docstring.split("\n")[-1].strip() not in ['"""', "'''"]:
return violations.D209() | ['def', 'check_newline_after_last_paragraph', '(', 'self', ',', 'definition', ',', 'docstring', ')', ':', 'if', 'docstring', ':', 'lines', '=', '[', 'l', 'for', 'l', 'in', 'ast', '.', 'literal_eval', '(', 'docstring', ')', '.', 'split', '(', "'\\n'", ')', 'if', 'not', 'is_blank', '(', 'l', ')', ']', 'if', 'len', '(', 'lines', ')', '>', '1', ':', 'if', 'docstring', '.', 'split', '(', '"\\n"', ')', '[', '-', '1', ']', '.', 'strip', '(', ')', 'not', 'in', '[', '\'"""\'', ',', '"\'\'\'"', ']', ':', 'return', 'violations', '.', 'D209', '(', ')'] | D209: Put multi-line docstring closing quotes on separate line.
Unless the entire docstring fits on a line, place the closing
quotes on a line by themselves. | ['D209', ':', 'Put', 'multi', '-', 'line', 'docstring', 'closing', 'quotes', 'on', 'separate', 'line', '.'] | train | https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/checker.py#L233-L245 |
7,421 | SUSE-Enceladus/ipa | ipa/ipa_controller.py | collect_results | def collect_results(results_file):
"""Return the result (pass/fail) for json file."""
with open(results_file, 'r') as results:
data = json.load(results)
return data | python | def collect_results(results_file):
"""Return the result (pass/fail) for json file."""
with open(results_file, 'r') as results:
data = json.load(results)
return data | ['def', 'collect_results', '(', 'results_file', ')', ':', 'with', 'open', '(', 'results_file', ',', "'r'", ')', 'as', 'results', ':', 'data', '=', 'json', '.', 'load', '(', 'results', ')', 'return', 'data'] | Return the result (pass/fail) for json file. | ['Return', 'the', 'result', '(', 'pass', '/', 'fail', ')', 'for', 'json', 'file', '.'] | train | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_controller.py#L135-L139 |
7,422 | mongolab/dex | dex/analyzer.py | QueryAnalyzer._generate_recommendation | def _generate_recommendation(self,
query_analysis,
db_name,
collection_name):
"""Generates an ideal query recommendation"""
index_rec = '{'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is EQUIV_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is SORT_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is RANGE_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
index_rec += '}'
# RECOMMENDATION
return OrderedDict([('index',index_rec),
('shellCommand', self.generate_shell_command(collection_name, index_rec))]) | python | def _generate_recommendation(self,
query_analysis,
db_name,
collection_name):
"""Generates an ideal query recommendation"""
index_rec = '{'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is EQUIV_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is SORT_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is RANGE_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
index_rec += '}'
# RECOMMENDATION
return OrderedDict([('index',index_rec),
('shellCommand', self.generate_shell_command(collection_name, index_rec))]) | ['def', '_generate_recommendation', '(', 'self', ',', 'query_analysis', ',', 'db_name', ',', 'collection_name', ')', ':', 'index_rec', '=', "'{'", 'for', 'query_field', 'in', 'query_analysis', '[', "'analyzedFields'", ']', ':', 'if', 'query_field', '[', "'fieldType'", ']', 'is', 'EQUIV_TYPE', ':', 'if', 'len', '(', 'index_rec', ')', 'is', 'not', '1', ':', 'index_rec', '+=', "', '", 'index_rec', '+=', '\'"\'', '+', 'query_field', '[', "'fieldName'", ']', '+', '\'": 1\'', 'for', 'query_field', 'in', 'query_analysis', '[', "'analyzedFields'", ']', ':', 'if', 'query_field', '[', "'fieldType'", ']', 'is', 'SORT_TYPE', ':', 'if', 'len', '(', 'index_rec', ')', 'is', 'not', '1', ':', 'index_rec', '+=', "', '", 'index_rec', '+=', '\'"\'', '+', 'query_field', '[', "'fieldName'", ']', '+', '\'": 1\'', 'for', 'query_field', 'in', 'query_analysis', '[', "'analyzedFields'", ']', ':', 'if', 'query_field', '[', "'fieldType'", ']', 'is', 'RANGE_TYPE', ':', 'if', 'len', '(', 'index_rec', ')', 'is', 'not', '1', ':', 'index_rec', '+=', "', '", 'index_rec', '+=', '\'"\'', '+', 'query_field', '[', "'fieldName'", ']', '+', '\'": 1\'', 'index_rec', '+=', "'}'", '# RECOMMENDATION', 'return', 'OrderedDict', '(', '[', '(', "'index'", ',', 'index_rec', ')', ',', '(', "'shellCommand'", ',', 'self', '.', 'generate_shell_command', '(', 'collection_name', ',', 'index_rec', ')', ')', ']', ')'] | Generates an ideal query recommendation | ['Generates', 'an', 'ideal', 'query', 'recommendation'] | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L276-L301 |
7,423 | JamesRamm/longclaw | longclaw/basket/utils.py | get_basket_items | def get_basket_items(request):
"""
Get all items in the basket
"""
bid = basket_id(request)
return BasketItem.objects.filter(basket_id=bid), bid | python | def get_basket_items(request):
"""
Get all items in the basket
"""
bid = basket_id(request)
return BasketItem.objects.filter(basket_id=bid), bid | ['def', 'get_basket_items', '(', 'request', ')', ':', 'bid', '=', 'basket_id', '(', 'request', ')', 'return', 'BasketItem', '.', 'objects', '.', 'filter', '(', 'basket_id', '=', 'bid', ')', ',', 'bid'] | Get all items in the basket | ['Get', 'all', 'items', 'in', 'the', 'basket'] | train | https://github.com/JamesRamm/longclaw/blob/8bbf2e6d703271b815ec111813c7c5d1d4e4e810/longclaw/basket/utils.py#L22-L27 |
7,424 | joferkington/mplstereonet | mplstereonet/utilities.py | parse_azimuth | def parse_azimuth(azimuth):
"""
Parses an azimuth measurement in azimuth or quadrant format.
Parameters
-----------
azimuth : string or number
An azimuth measurement in degrees or a quadrant measurement of azimuth.
Returns
-------
azi : float
The azimuth in degrees clockwise from north (range: 0-360)
See Also
--------
parse_quadrant_measurement
parse_strike_dip
parse_plunge_bearing
"""
try:
azimuth = float(azimuth)
except ValueError:
if not azimuth[0].isalpha():
raise ValueError('Ambiguous azimuth: {}'.format(azimuth))
azimuth = parse_quadrant_measurement(azimuth)
return azimuth | python | def parse_azimuth(azimuth):
"""
Parses an azimuth measurement in azimuth or quadrant format.
Parameters
-----------
azimuth : string or number
An azimuth measurement in degrees or a quadrant measurement of azimuth.
Returns
-------
azi : float
The azimuth in degrees clockwise from north (range: 0-360)
See Also
--------
parse_quadrant_measurement
parse_strike_dip
parse_plunge_bearing
"""
try:
azimuth = float(azimuth)
except ValueError:
if not azimuth[0].isalpha():
raise ValueError('Ambiguous azimuth: {}'.format(azimuth))
azimuth = parse_quadrant_measurement(azimuth)
return azimuth | ['def', 'parse_azimuth', '(', 'azimuth', ')', ':', 'try', ':', 'azimuth', '=', 'float', '(', 'azimuth', ')', 'except', 'ValueError', ':', 'if', 'not', 'azimuth', '[', '0', ']', '.', 'isalpha', '(', ')', ':', 'raise', 'ValueError', '(', "'Ambiguous azimuth: {}'", '.', 'format', '(', 'azimuth', ')', ')', 'azimuth', '=', 'parse_quadrant_measurement', '(', 'azimuth', ')', 'return', 'azimuth'] | Parses an azimuth measurement in azimuth or quadrant format.
Parameters
-----------
azimuth : string or number
An azimuth measurement in degrees or a quadrant measurement of azimuth.
Returns
-------
azi : float
The azimuth in degrees clockwise from north (range: 0-360)
See Also
--------
parse_quadrant_measurement
parse_strike_dip
parse_plunge_bearing | ['Parses', 'an', 'azimuth', 'measurement', 'in', 'azimuth', 'or', 'quadrant', 'format', '.'] | train | https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/utilities.py#L223-L249 |
7,425 | andersinno/hayes | hayes/ext/date_tail.py | generate_date_tail_boost_queries | def generate_date_tail_boost_queries(
field, timedeltas_and_boosts, relative_to=None):
"""
Generate a list of RangeQueries usable to boost the scores of more
recent documents.
Example:
```
queries = generate_date_tail_boost_queries("publish_date", {
timedelta(days=90): 1,
timedelta(days=30): 2,
timedelta(days=10): 4,
})
s = Search(BoolQuery(must=..., should=queries))
# ...
```
Refs:
http://elasticsearch-users.115913.n3.nabble.com/Boost-recent-documents-td2126107.html#a2126317
:param field: field name to generate the queries against
:param timedeltas_and_boosts:
dictionary of timedelta instances and their boosts. Negative or
zero boost values will not generate rangequeries.
:type timedeltas_and_boosts: dict[timedelta, float]
:param relative_to: Relative to this datetime (may be None for "now")
:return: List of RangeQueries
"""
relative_to = relative_to or datetime.datetime.now()
times = {}
for timedelta, boost in timedeltas_and_boosts.items():
date = (relative_to - timedelta).date()
times[date] = boost
times = sorted(times.items(), key=lambda i: i[0])
queries = []
for (x, time) in enumerate(times):
kwargs = {"field": field, "boost": time[1]}
if x == 0:
kwargs["lte"] = time[0]
else:
kwargs["gt"] = time[0]
if x < len(times) - 1:
kwargs["lte"] = times[x + 1][0]
if kwargs["boost"] > 0:
q = RangeQuery()
q.add_range(**kwargs)
queries.append(q)
return queries | python | def generate_date_tail_boost_queries(
field, timedeltas_and_boosts, relative_to=None):
"""
Generate a list of RangeQueries usable to boost the scores of more
recent documents.
Example:
```
queries = generate_date_tail_boost_queries("publish_date", {
timedelta(days=90): 1,
timedelta(days=30): 2,
timedelta(days=10): 4,
})
s = Search(BoolQuery(must=..., should=queries))
# ...
```
Refs:
http://elasticsearch-users.115913.n3.nabble.com/Boost-recent-documents-td2126107.html#a2126317
:param field: field name to generate the queries against
:param timedeltas_and_boosts:
dictionary of timedelta instances and their boosts. Negative or
zero boost values will not generate rangequeries.
:type timedeltas_and_boosts: dict[timedelta, float]
:param relative_to: Relative to this datetime (may be None for "now")
:return: List of RangeQueries
"""
relative_to = relative_to or datetime.datetime.now()
times = {}
for timedelta, boost in timedeltas_and_boosts.items():
date = (relative_to - timedelta).date()
times[date] = boost
times = sorted(times.items(), key=lambda i: i[0])
queries = []
for (x, time) in enumerate(times):
kwargs = {"field": field, "boost": time[1]}
if x == 0:
kwargs["lte"] = time[0]
else:
kwargs["gt"] = time[0]
if x < len(times) - 1:
kwargs["lte"] = times[x + 1][0]
if kwargs["boost"] > 0:
q = RangeQuery()
q.add_range(**kwargs)
queries.append(q)
return queries | ['def', 'generate_date_tail_boost_queries', '(', 'field', ',', 'timedeltas_and_boosts', ',', 'relative_to', '=', 'None', ')', ':', 'relative_to', '=', 'relative_to', 'or', 'datetime', '.', 'datetime', '.', 'now', '(', ')', 'times', '=', '{', '}', 'for', 'timedelta', ',', 'boost', 'in', 'timedeltas_and_boosts', '.', 'items', '(', ')', ':', 'date', '=', '(', 'relative_to', '-', 'timedelta', ')', '.', 'date', '(', ')', 'times', '[', 'date', ']', '=', 'boost', 'times', '=', 'sorted', '(', 'times', '.', 'items', '(', ')', ',', 'key', '=', 'lambda', 'i', ':', 'i', '[', '0', ']', ')', 'queries', '=', '[', ']', 'for', '(', 'x', ',', 'time', ')', 'in', 'enumerate', '(', 'times', ')', ':', 'kwargs', '=', '{', '"field"', ':', 'field', ',', '"boost"', ':', 'time', '[', '1', ']', '}', 'if', 'x', '==', '0', ':', 'kwargs', '[', '"lte"', ']', '=', 'time', '[', '0', ']', 'else', ':', 'kwargs', '[', '"gt"', ']', '=', 'time', '[', '0', ']', 'if', 'x', '<', 'len', '(', 'times', ')', '-', '1', ':', 'kwargs', '[', '"lte"', ']', '=', 'times', '[', 'x', '+', '1', ']', '[', '0', ']', 'if', 'kwargs', '[', '"boost"', ']', '>', '0', ':', 'q', '=', 'RangeQuery', '(', ')', 'q', '.', 'add_range', '(', '*', '*', 'kwargs', ')', 'queries', '.', 'append', '(', 'q', ')', 'return', 'queries'] | Generate a list of RangeQueries usable to boost the scores of more
recent documents.
Example:
```
queries = generate_date_tail_boost_queries("publish_date", {
timedelta(days=90): 1,
timedelta(days=30): 2,
timedelta(days=10): 4,
})
s = Search(BoolQuery(must=..., should=queries))
# ...
```
Refs:
http://elasticsearch-users.115913.n3.nabble.com/Boost-recent-documents-td2126107.html#a2126317
:param field: field name to generate the queries against
:param timedeltas_and_boosts:
dictionary of timedelta instances and their boosts. Negative or
zero boost values will not generate rangequeries.
:type timedeltas_and_boosts: dict[timedelta, float]
:param relative_to: Relative to this datetime (may be None for "now")
:return: List of RangeQueries | ['Generate', 'a', 'list', 'of', 'RangeQueries', 'usable', 'to', 'boost', 'the', 'scores', 'of', 'more', 'recent', 'documents', '.'] | train | https://github.com/andersinno/hayes/blob/88d1f6b3e0cd993d9d9fc136506bd01165fea64b/hayes/ext/date_tail.py#L7-L58 |
7,426 | rsheftel/raccoon | raccoon/dataframe.py | DataFrame.to_json | def to_json(self):
"""
Returns a JSON of the entire DataFrame that can be reconstructed back with raccoon.from_json(input). Any object
that cannot be serialized will be replaced with the representation of the object using repr(). In that instance
the DataFrame will have a string representation in place of the object and will not reconstruct exactly.
:return: json string
"""
input_dict = {'data': self.to_dict(index=False), 'index': list(self._index)}
# if blist, turn into lists
if self.blist:
input_dict['index'] = list(input_dict['index'])
for key in input_dict['data']:
input_dict['data'][key] = list(input_dict['data'][key])
meta_data = dict()
for key in self.__slots__:
if key not in ['_data', '_index']:
value = self.__getattribute__(key)
meta_data[key.lstrip('_')] = value if not isinstance(value, blist) else list(value)
meta_data['use_blist'] = meta_data.pop('blist')
input_dict['meta_data'] = meta_data
return json.dumps(input_dict, default=repr) | python | def to_json(self):
"""
Returns a JSON of the entire DataFrame that can be reconstructed back with raccoon.from_json(input). Any object
that cannot be serialized will be replaced with the representation of the object using repr(). In that instance
the DataFrame will have a string representation in place of the object and will not reconstruct exactly.
:return: json string
"""
input_dict = {'data': self.to_dict(index=False), 'index': list(self._index)}
# if blist, turn into lists
if self.blist:
input_dict['index'] = list(input_dict['index'])
for key in input_dict['data']:
input_dict['data'][key] = list(input_dict['data'][key])
meta_data = dict()
for key in self.__slots__:
if key not in ['_data', '_index']:
value = self.__getattribute__(key)
meta_data[key.lstrip('_')] = value if not isinstance(value, blist) else list(value)
meta_data['use_blist'] = meta_data.pop('blist')
input_dict['meta_data'] = meta_data
return json.dumps(input_dict, default=repr) | ['def', 'to_json', '(', 'self', ')', ':', 'input_dict', '=', '{', "'data'", ':', 'self', '.', 'to_dict', '(', 'index', '=', 'False', ')', ',', "'index'", ':', 'list', '(', 'self', '.', '_index', ')', '}', '# if blist, turn into lists', 'if', 'self', '.', 'blist', ':', 'input_dict', '[', "'index'", ']', '=', 'list', '(', 'input_dict', '[', "'index'", ']', ')', 'for', 'key', 'in', 'input_dict', '[', "'data'", ']', ':', 'input_dict', '[', "'data'", ']', '[', 'key', ']', '=', 'list', '(', 'input_dict', '[', "'data'", ']', '[', 'key', ']', ')', 'meta_data', '=', 'dict', '(', ')', 'for', 'key', 'in', 'self', '.', '__slots__', ':', 'if', 'key', 'not', 'in', '[', "'_data'", ',', "'_index'", ']', ':', 'value', '=', 'self', '.', '__getattribute__', '(', 'key', ')', 'meta_data', '[', 'key', '.', 'lstrip', '(', "'_'", ')', ']', '=', 'value', 'if', 'not', 'isinstance', '(', 'value', ',', 'blist', ')', 'else', 'list', '(', 'value', ')', 'meta_data', '[', "'use_blist'", ']', '=', 'meta_data', '.', 'pop', '(', "'blist'", ')', 'input_dict', '[', "'meta_data'", ']', '=', 'meta_data', 'return', 'json', '.', 'dumps', '(', 'input_dict', ',', 'default', '=', 'repr', ')'] | Returns a JSON of the entire DataFrame that can be reconstructed back with raccoon.from_json(input). Any object
that cannot be serialized will be replaced with the representation of the object using repr(). In that instance
the DataFrame will have a string representation in place of the object and will not reconstruct exactly.
:return: json string | ['Returns', 'a', 'JSON', 'of', 'the', 'entire', 'DataFrame', 'that', 'can', 'be', 'reconstructed', 'back', 'with', 'raccoon', '.', 'from_json', '(', 'input', ')', '.', 'Any', 'object', 'that', 'cannot', 'be', 'serialized', 'will', 'be', 'replaced', 'with', 'the', 'representation', 'of', 'the', 'object', 'using', 'repr', '()', '.', 'In', 'that', 'instance', 'the', 'DataFrame', 'will', 'have', 'a', 'string', 'representation', 'in', 'place', 'of', 'the', 'object', 'and', 'will', 'not', 'reconstruct', 'exactly', '.'] | train | https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L851-L874 |
7,427 | apache/incubator-mxnet | example/gluon/lipnet/trainer.py | Train.infer | def infer(self, input_data, input_label):
"""
Description : Print sentence for prediction result
"""
sum_losses = 0
len_losses = 0
for data, label in zip(input_data, input_label):
pred = self.net(data)
sum_losses += mx.nd.array(self.loss_fn(pred, label)).sum().asscalar()
len_losses += len(data)
pred_convert = char_beam_search(pred)
label_convert = char_conv(label.asnumpy())
for target, pred in zip(label_convert, pred_convert):
print("target:{t} pred:{p}".format(t=target, p=pred))
return sum_losses, len_losses | python | def infer(self, input_data, input_label):
"""
Description : Print sentence for prediction result
"""
sum_losses = 0
len_losses = 0
for data, label in zip(input_data, input_label):
pred = self.net(data)
sum_losses += mx.nd.array(self.loss_fn(pred, label)).sum().asscalar()
len_losses += len(data)
pred_convert = char_beam_search(pred)
label_convert = char_conv(label.asnumpy())
for target, pred in zip(label_convert, pred_convert):
print("target:{t} pred:{p}".format(t=target, p=pred))
return sum_losses, len_losses | ['def', 'infer', '(', 'self', ',', 'input_data', ',', 'input_label', ')', ':', 'sum_losses', '=', '0', 'len_losses', '=', '0', 'for', 'data', ',', 'label', 'in', 'zip', '(', 'input_data', ',', 'input_label', ')', ':', 'pred', '=', 'self', '.', 'net', '(', 'data', ')', 'sum_losses', '+=', 'mx', '.', 'nd', '.', 'array', '(', 'self', '.', 'loss_fn', '(', 'pred', ',', 'label', ')', ')', '.', 'sum', '(', ')', '.', 'asscalar', '(', ')', 'len_losses', '+=', 'len', '(', 'data', ')', 'pred_convert', '=', 'char_beam_search', '(', 'pred', ')', 'label_convert', '=', 'char_conv', '(', 'label', '.', 'asnumpy', '(', ')', ')', 'for', 'target', ',', 'pred', 'in', 'zip', '(', 'label_convert', ',', 'pred_convert', ')', ':', 'print', '(', '"target:{t} pred:{p}"', '.', 'format', '(', 't', '=', 'target', ',', 'p', '=', 'pred', ')', ')', 'return', 'sum_losses', ',', 'len_losses'] | Description : Print sentence for prediction result | ['Description', ':', 'Print', 'sentence', 'for', 'prediction', 'result'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/trainer.py#L156-L170 |
7,428 | pazz/alot | alot/db/manager.py | DBManager._get_notmuch_message | def _get_notmuch_message(self, mid):
"""returns :class:`notmuch.database.Message` with given id"""
mode = Database.MODE.READ_ONLY
db = Database(path=self.path, mode=mode)
try:
return db.find_message(mid)
except:
errmsg = 'no message with id %s exists!' % mid
raise NonexistantObjectError(errmsg) | python | def _get_notmuch_message(self, mid):
"""returns :class:`notmuch.database.Message` with given id"""
mode = Database.MODE.READ_ONLY
db = Database(path=self.path, mode=mode)
try:
return db.find_message(mid)
except:
errmsg = 'no message with id %s exists!' % mid
raise NonexistantObjectError(errmsg) | ['def', '_get_notmuch_message', '(', 'self', ',', 'mid', ')', ':', 'mode', '=', 'Database', '.', 'MODE', '.', 'READ_ONLY', 'db', '=', 'Database', '(', 'path', '=', 'self', '.', 'path', ',', 'mode', '=', 'mode', ')', 'try', ':', 'return', 'db', '.', 'find_message', '(', 'mid', ')', 'except', ':', 'errmsg', '=', "'no message with id %s exists!'", '%', 'mid', 'raise', 'NonexistantObjectError', '(', 'errmsg', ')'] | returns :class:`notmuch.database.Message` with given id | ['returns', ':', 'class', ':', 'notmuch', '.', 'database', '.', 'Message', 'with', 'given', 'id'] | train | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/manager.py#L284-L292 |
7,429 | scanny/python-pptx | pptx/chart/data.py | Categories.levels | def levels(self):
"""
A generator of (idx, label) sequences representing the category
hierarchy from the bottom up. The first level contains all leaf
categories, and each subsequent is the next level up.
"""
def levels(categories):
# yield all lower levels
sub_categories = [
sc for c in categories for sc in c.sub_categories
]
if sub_categories:
for level in levels(sub_categories):
yield level
# yield this level
yield [(cat.idx, cat.label) for cat in categories]
for level in levels(self):
yield level | python | def levels(self):
"""
A generator of (idx, label) sequences representing the category
hierarchy from the bottom up. The first level contains all leaf
categories, and each subsequent is the next level up.
"""
def levels(categories):
# yield all lower levels
sub_categories = [
sc for c in categories for sc in c.sub_categories
]
if sub_categories:
for level in levels(sub_categories):
yield level
# yield this level
yield [(cat.idx, cat.label) for cat in categories]
for level in levels(self):
yield level | ['def', 'levels', '(', 'self', ')', ':', 'def', 'levels', '(', 'categories', ')', ':', '# yield all lower levels', 'sub_categories', '=', '[', 'sc', 'for', 'c', 'in', 'categories', 'for', 'sc', 'in', 'c', '.', 'sub_categories', ']', 'if', 'sub_categories', ':', 'for', 'level', 'in', 'levels', '(', 'sub_categories', ')', ':', 'yield', 'level', '# yield this level', 'yield', '[', '(', 'cat', '.', 'idx', ',', 'cat', '.', 'label', ')', 'for', 'cat', 'in', 'categories', ']', 'for', 'level', 'in', 'levels', '(', 'self', ')', ':', 'yield', 'level'] | A generator of (idx, label) sequences representing the category
hierarchy from the bottom up. The first level contains all leaf
categories, and each subsequent is the next level up. | ['A', 'generator', 'of', '(', 'idx', 'label', ')', 'sequences', 'representing', 'the', 'category', 'hierarchy', 'from', 'the', 'bottom', 'up', '.', 'The', 'first', 'level', 'contains', 'all', 'leaf', 'categories', 'and', 'each', 'subsequent', 'is', 'the', 'next', 'level', 'up', '.'] | train | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/data.py#L455-L473 |
7,430 | zarr-developers/zarr | zarr/core.py | Array.get_orthogonal_selection | def get_orthogonal_selection(self, selection, out=None, fields=None):
"""Retrieve data by making a selection for each dimension of the array. For
example, if an array has 2 dimensions, allows selecting specific rows and/or
columns. The selection for each dimension can be either an integer (indexing a
single item), a slice, an array of integers, or a Boolean array where True
values indicate a selection.
Parameters
----------
selection : tuple
A selection for each dimension of the array. May be any combination of int,
slice, integer array or Boolean array.
out : ndarray, optional
If given, load the selected data directly into this array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to
extract data for.
Returns
-------
out : ndarray
A NumPy array containing the data for the requested selection.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100).reshape(10, 10))
Retrieve rows and columns via any combination of int, slice, integer array and/or
Boolean array::
>>> z.get_orthogonal_selection(([1, 4], slice(None)))
array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])
>>> z.get_orthogonal_selection((slice(None), [1, 4]))
array([[ 1, 4],
[11, 14],
[21, 24],
[31, 34],
[41, 44],
[51, 54],
[61, 64],
[71, 74],
[81, 84],
[91, 94]])
>>> z.get_orthogonal_selection(([1, 4], [1, 4]))
array([[11, 14],
[41, 44]])
>>> sel = np.zeros(z.shape[0], dtype=bool)
>>> sel[1] = True
>>> sel[4] = True
>>> z.get_orthogonal_selection((sel, sel))
array([[11, 14],
[41, 44]])
For convenience, the orthogonal selection functionality is also available via the
`oindex` property, e.g.::
>>> z.oindex[[1, 4], :]
array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])
>>> z.oindex[:, [1, 4]]
array([[ 1, 4],
[11, 14],
[21, 24],
[31, 34],
[41, 44],
[51, 54],
[61, 64],
[71, 74],
[81, 84],
[91, 94]])
>>> z.oindex[[1, 4], [1, 4]]
array([[11, 14],
[41, 44]])
>>> sel = np.zeros(z.shape[0], dtype=bool)
>>> sel[1] = True
>>> sel[4] = True
>>> z.oindex[sel, sel]
array([[11, 14],
[41, 44]])
Notes
-----
Orthogonal indexing is also known as outer indexing.
Slices with step > 1 are supported, but slices with negative step are not.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, set_orthogonal_selection,
vindex, oindex, __getitem__, __setitem__
"""
# refresh metadata
if not self._cache_metadata:
self._load_metadata()
# check args
check_fields(fields, self._dtype)
# setup indexer
indexer = OrthogonalIndexer(selection, self)
return self._get_selection(indexer=indexer, out=out, fields=fields) | python | def get_orthogonal_selection(self, selection, out=None, fields=None):
"""Retrieve data by making a selection for each dimension of the array. For
example, if an array has 2 dimensions, allows selecting specific rows and/or
columns. The selection for each dimension can be either an integer (indexing a
single item), a slice, an array of integers, or a Boolean array where True
values indicate a selection.
Parameters
----------
selection : tuple
A selection for each dimension of the array. May be any combination of int,
slice, integer array or Boolean array.
out : ndarray, optional
If given, load the selected data directly into this array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to
extract data for.
Returns
-------
out : ndarray
A NumPy array containing the data for the requested selection.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100).reshape(10, 10))
Retrieve rows and columns via any combination of int, slice, integer array and/or
Boolean array::
>>> z.get_orthogonal_selection(([1, 4], slice(None)))
array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])
>>> z.get_orthogonal_selection((slice(None), [1, 4]))
array([[ 1, 4],
[11, 14],
[21, 24],
[31, 34],
[41, 44],
[51, 54],
[61, 64],
[71, 74],
[81, 84],
[91, 94]])
>>> z.get_orthogonal_selection(([1, 4], [1, 4]))
array([[11, 14],
[41, 44]])
>>> sel = np.zeros(z.shape[0], dtype=bool)
>>> sel[1] = True
>>> sel[4] = True
>>> z.get_orthogonal_selection((sel, sel))
array([[11, 14],
[41, 44]])
For convenience, the orthogonal selection functionality is also available via the
`oindex` property, e.g.::
>>> z.oindex[[1, 4], :]
array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])
>>> z.oindex[:, [1, 4]]
array([[ 1, 4],
[11, 14],
[21, 24],
[31, 34],
[41, 44],
[51, 54],
[61, 64],
[71, 74],
[81, 84],
[91, 94]])
>>> z.oindex[[1, 4], [1, 4]]
array([[11, 14],
[41, 44]])
>>> sel = np.zeros(z.shape[0], dtype=bool)
>>> sel[1] = True
>>> sel[4] = True
>>> z.oindex[sel, sel]
array([[11, 14],
[41, 44]])
Notes
-----
Orthogonal indexing is also known as outer indexing.
Slices with step > 1 are supported, but slices with negative step are not.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, set_orthogonal_selection,
vindex, oindex, __getitem__, __setitem__
"""
# refresh metadata
if not self._cache_metadata:
self._load_metadata()
# check args
check_fields(fields, self._dtype)
# setup indexer
indexer = OrthogonalIndexer(selection, self)
return self._get_selection(indexer=indexer, out=out, fields=fields) | ['def', 'get_orthogonal_selection', '(', 'self', ',', 'selection', ',', 'out', '=', 'None', ',', 'fields', '=', 'None', ')', ':', '# refresh metadata', 'if', 'not', 'self', '.', '_cache_metadata', ':', 'self', '.', '_load_metadata', '(', ')', '# check args', 'check_fields', '(', 'fields', ',', 'self', '.', '_dtype', ')', '# setup indexer', 'indexer', '=', 'OrthogonalIndexer', '(', 'selection', ',', 'self', ')', 'return', 'self', '.', '_get_selection', '(', 'indexer', '=', 'indexer', ',', 'out', '=', 'out', ',', 'fields', '=', 'fields', ')'] | Retrieve data by making a selection for each dimension of the array. For
example, if an array has 2 dimensions, allows selecting specific rows and/or
columns. The selection for each dimension can be either an integer (indexing a
single item), a slice, an array of integers, or a Boolean array where True
values indicate a selection.
Parameters
----------
selection : tuple
A selection for each dimension of the array. May be any combination of int,
slice, integer array or Boolean array.
out : ndarray, optional
If given, load the selected data directly into this array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to
extract data for.
Returns
-------
out : ndarray
A NumPy array containing the data for the requested selection.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100).reshape(10, 10))
Retrieve rows and columns via any combination of int, slice, integer array and/or
Boolean array::
>>> z.get_orthogonal_selection(([1, 4], slice(None)))
array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])
>>> z.get_orthogonal_selection((slice(None), [1, 4]))
array([[ 1, 4],
[11, 14],
[21, 24],
[31, 34],
[41, 44],
[51, 54],
[61, 64],
[71, 74],
[81, 84],
[91, 94]])
>>> z.get_orthogonal_selection(([1, 4], [1, 4]))
array([[11, 14],
[41, 44]])
>>> sel = np.zeros(z.shape[0], dtype=bool)
>>> sel[1] = True
>>> sel[4] = True
>>> z.get_orthogonal_selection((sel, sel))
array([[11, 14],
[41, 44]])
For convenience, the orthogonal selection functionality is also available via the
`oindex` property, e.g.::
>>> z.oindex[[1, 4], :]
array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])
>>> z.oindex[:, [1, 4]]
array([[ 1, 4],
[11, 14],
[21, 24],
[31, 34],
[41, 44],
[51, 54],
[61, 64],
[71, 74],
[81, 84],
[91, 94]])
>>> z.oindex[[1, 4], [1, 4]]
array([[11, 14],
[41, 44]])
>>> sel = np.zeros(z.shape[0], dtype=bool)
>>> sel[1] = True
>>> sel[4] = True
>>> z.oindex[sel, sel]
array([[11, 14],
[41, 44]])
Notes
-----
Orthogonal indexing is also known as outer indexing.
Slices with step > 1 are supported, but slices with negative step are not.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, set_orthogonal_selection,
vindex, oindex, __getitem__, __setitem__ | ['Retrieve', 'data', 'by', 'making', 'a', 'selection', 'for', 'each', 'dimension', 'of', 'the', 'array', '.', 'For', 'example', 'if', 'an', 'array', 'has', '2', 'dimensions', 'allows', 'selecting', 'specific', 'rows', 'and', '/', 'or', 'columns', '.', 'The', 'selection', 'for', 'each', 'dimension', 'can', 'be', 'either', 'an', 'integer', '(', 'indexing', 'a', 'single', 'item', ')', 'a', 'slice', 'an', 'array', 'of', 'integers', 'or', 'a', 'Boolean', 'array', 'where', 'True', 'values', 'indicate', 'a', 'selection', '.'] | train | https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L742-L851 |
7,431 | AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | reordc | def reordc(iorder, ndim, lenvals, array):
"""
Re-order the elements of an array of character strings
according to a given order vector.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/reordc_c.html
:param iorder: Order vector to be used to re-order array.
:type iorder: Array of ints
:param ndim: Dimension of array.
:type ndim: int
:param lenvals: String length.
:type lenvals: int
:param array: Array to be re-ordered.
:type array: Array of strs
:return: Re-ordered Array.
:rtype: Array of strs
"""
iorder = stypes.toIntVector(iorder)
ndim = ctypes.c_int(ndim)
lenvals = ctypes.c_int(lenvals + 1)
array = stypes.listToCharArray(array, xLen=lenvals, yLen=ndim)
libspice.reordc_c(iorder, ndim, lenvals, array)
return [stypes.toPythonString(x.value) for x in array] | python | def reordc(iorder, ndim, lenvals, array):
"""
Re-order the elements of an array of character strings
according to a given order vector.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/reordc_c.html
:param iorder: Order vector to be used to re-order array.
:type iorder: Array of ints
:param ndim: Dimension of array.
:type ndim: int
:param lenvals: String length.
:type lenvals: int
:param array: Array to be re-ordered.
:type array: Array of strs
:return: Re-ordered Array.
:rtype: Array of strs
"""
iorder = stypes.toIntVector(iorder)
ndim = ctypes.c_int(ndim)
lenvals = ctypes.c_int(lenvals + 1)
array = stypes.listToCharArray(array, xLen=lenvals, yLen=ndim)
libspice.reordc_c(iorder, ndim, lenvals, array)
return [stypes.toPythonString(x.value) for x in array] | ['def', 'reordc', '(', 'iorder', ',', 'ndim', ',', 'lenvals', ',', 'array', ')', ':', 'iorder', '=', 'stypes', '.', 'toIntVector', '(', 'iorder', ')', 'ndim', '=', 'ctypes', '.', 'c_int', '(', 'ndim', ')', 'lenvals', '=', 'ctypes', '.', 'c_int', '(', 'lenvals', '+', '1', ')', 'array', '=', 'stypes', '.', 'listToCharArray', '(', 'array', ',', 'xLen', '=', 'lenvals', ',', 'yLen', '=', 'ndim', ')', 'libspice', '.', 'reordc_c', '(', 'iorder', ',', 'ndim', ',', 'lenvals', ',', 'array', ')', 'return', '[', 'stypes', '.', 'toPythonString', '(', 'x', '.', 'value', ')', 'for', 'x', 'in', 'array', ']'] | Re-order the elements of an array of character strings
according to a given order vector.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/reordc_c.html
:param iorder: Order vector to be used to re-order array.
:type iorder: Array of ints
:param ndim: Dimension of array.
:type ndim: int
:param lenvals: String length.
:type lenvals: int
:param array: Array to be re-ordered.
:type array: Array of strs
:return: Re-ordered Array.
:rtype: Array of strs | ['Re', '-', 'order', 'the', 'elements', 'of', 'an', 'array', 'of', 'character', 'strings', 'according', 'to', 'a', 'given', 'order', 'vector', '.'] | train | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L10377-L10400 |
7,432 | acutesoftware/AIKIF | scripts/examples/happiness_solver.py | create_random_population | def create_random_population(num=100):
"""
create a list of people with randomly generated names and stats
"""
people = []
for _ in range(num):
nme = 'blah'
tax_min = random.randint(1,40)/100
tax_max = tax_min + random.randint(1,40)/100
tradition = random.randint(1,100)/100
equity = random.randint(1,100)/100
pers = mod_hap_env.Person(nme, {'tax_min':tax_min, 'tax_max':tax_max, 'tradition':tradition, 'equity':equity})
people.append(pers)
print(pers)
return people | python | def create_random_population(num=100):
"""
create a list of people with randomly generated names and stats
"""
people = []
for _ in range(num):
nme = 'blah'
tax_min = random.randint(1,40)/100
tax_max = tax_min + random.randint(1,40)/100
tradition = random.randint(1,100)/100
equity = random.randint(1,100)/100
pers = mod_hap_env.Person(nme, {'tax_min':tax_min, 'tax_max':tax_max, 'tradition':tradition, 'equity':equity})
people.append(pers)
print(pers)
return people | ['def', 'create_random_population', '(', 'num', '=', '100', ')', ':', 'people', '=', '[', ']', 'for', '_', 'in', 'range', '(', 'num', ')', ':', 'nme', '=', "'blah'", 'tax_min', '=', 'random', '.', 'randint', '(', '1', ',', '40', ')', '/', '100', 'tax_max', '=', 'tax_min', '+', 'random', '.', 'randint', '(', '1', ',', '40', ')', '/', '100', 'tradition', '=', 'random', '.', 'randint', '(', '1', ',', '100', ')', '/', '100', 'equity', '=', 'random', '.', 'randint', '(', '1', ',', '100', ')', '/', '100', 'pers', '=', 'mod_hap_env', '.', 'Person', '(', 'nme', ',', '{', "'tax_min'", ':', 'tax_min', ',', "'tax_max'", ':', 'tax_max', ',', "'tradition'", ':', 'tradition', ',', "'equity'", ':', 'equity', '}', ')', 'people', '.', 'append', '(', 'pers', ')', 'print', '(', 'pers', ')', 'return', 'people'] | create a list of people with randomly generated names and stats | ['create', 'a', 'list', 'of', 'people', 'with', 'randomly', 'generated', 'names', 'and', 'stats'] | train | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/happiness_solver.py#L29-L44 |
7,433 | J535D165/recordlinkage | recordlinkage/api.py | Compare.exact | def exact(self, *args, **kwargs):
"""Compare attributes of pairs exactly.
Shortcut of :class:`recordlinkage.compare.Exact`::
from recordlinkage.compare import Exact
indexer = recordlinkage.Compare()
indexer.add(Exact())
"""
compare = Exact(*args, **kwargs)
self.add(compare)
return self | python | def exact(self, *args, **kwargs):
"""Compare attributes of pairs exactly.
Shortcut of :class:`recordlinkage.compare.Exact`::
from recordlinkage.compare import Exact
indexer = recordlinkage.Compare()
indexer.add(Exact())
"""
compare = Exact(*args, **kwargs)
self.add(compare)
return self | ['def', 'exact', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'compare', '=', 'Exact', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'self', '.', 'add', '(', 'compare', ')', 'return', 'self'] | Compare attributes of pairs exactly.
Shortcut of :class:`recordlinkage.compare.Exact`::
from recordlinkage.compare import Exact
indexer = recordlinkage.Compare()
indexer.add(Exact()) | ['Compare', 'attributes', 'of', 'pairs', 'exactly', '.'] | train | https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/api.py#L147-L161 |
7,434 | mlavin/argyle | argyle/supervisor.py | upload_supervisor_app_conf | def upload_supervisor_app_conf(app_name, template_name=None, context=None):
"""Upload Supervisor app configuration from a template."""
default = {'app_name': app_name}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % app_name, u'supervisor/base.conf']
destination = u'/etc/supervisor/conf.d/%s.conf' % app_name
upload_template(template_name, destination, context=default, use_sudo=True)
supervisor_command(u'update') | python | def upload_supervisor_app_conf(app_name, template_name=None, context=None):
"""Upload Supervisor app configuration from a template."""
default = {'app_name': app_name}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % app_name, u'supervisor/base.conf']
destination = u'/etc/supervisor/conf.d/%s.conf' % app_name
upload_template(template_name, destination, context=default, use_sudo=True)
supervisor_command(u'update') | ['def', 'upload_supervisor_app_conf', '(', 'app_name', ',', 'template_name', '=', 'None', ',', 'context', '=', 'None', ')', ':', 'default', '=', '{', "'app_name'", ':', 'app_name', '}', 'context', '=', 'context', 'or', '{', '}', 'default', '.', 'update', '(', 'context', ')', 'template_name', '=', 'template_name', 'or', '[', "u'supervisor/%s.conf'", '%', 'app_name', ',', "u'supervisor/base.conf'", ']', 'destination', '=', "u'/etc/supervisor/conf.d/%s.conf'", '%', 'app_name', 'upload_template', '(', 'template_name', ',', 'destination', ',', 'context', '=', 'default', ',', 'use_sudo', '=', 'True', ')', 'supervisor_command', '(', "u'update'", ')'] | Upload Supervisor app configuration from a template. | ['Upload', 'Supervisor', 'app', 'configuration', 'from', 'a', 'template', '.'] | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/supervisor.py#L14-L23 |
7,435 | biocommons/biocommons.seqrepo | biocommons/seqrepo/seqrepo.py | SeqRepo.translate_identifier | def translate_identifier(self, identifier, target_namespaces=None, translate_ncbi_namespace=None):
"""Given a string identifier, return a list of aliases (as
identifiers) that refer to the same sequence.
"""
namespace, alias = identifier.split(nsa_sep) if nsa_sep in identifier else (None, identifier)
aliases = self.translate_alias(alias=alias,
namespace=namespace,
target_namespaces=target_namespaces,
translate_ncbi_namespace=translate_ncbi_namespace)
return [nsa_sep.join((a["namespace"], a["alias"])) for a in aliases] | python | def translate_identifier(self, identifier, target_namespaces=None, translate_ncbi_namespace=None):
"""Given a string identifier, return a list of aliases (as
identifiers) that refer to the same sequence.
"""
namespace, alias = identifier.split(nsa_sep) if nsa_sep in identifier else (None, identifier)
aliases = self.translate_alias(alias=alias,
namespace=namespace,
target_namespaces=target_namespaces,
translate_ncbi_namespace=translate_ncbi_namespace)
return [nsa_sep.join((a["namespace"], a["alias"])) for a in aliases] | ['def', 'translate_identifier', '(', 'self', ',', 'identifier', ',', 'target_namespaces', '=', 'None', ',', 'translate_ncbi_namespace', '=', 'None', ')', ':', 'namespace', ',', 'alias', '=', 'identifier', '.', 'split', '(', 'nsa_sep', ')', 'if', 'nsa_sep', 'in', 'identifier', 'else', '(', 'None', ',', 'identifier', ')', 'aliases', '=', 'self', '.', 'translate_alias', '(', 'alias', '=', 'alias', ',', 'namespace', '=', 'namespace', ',', 'target_namespaces', '=', 'target_namespaces', ',', 'translate_ncbi_namespace', '=', 'translate_ncbi_namespace', ')', 'return', '[', 'nsa_sep', '.', 'join', '(', '(', 'a', '[', '"namespace"', ']', ',', 'a', '[', '"alias"', ']', ')', ')', 'for', 'a', 'in', 'aliases', ']'] | Given a string identifier, return a list of aliases (as
identifiers) that refer to the same sequence. | ['Given', 'a', 'string', 'identifier', 'return', 'a', 'list', 'of', 'aliases', '(', 'as', 'identifiers', ')', 'that', 'refer', 'to', 'the', 'same', 'sequence', '.'] | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqrepo.py#L191-L201 |
7,436 | mdsol/rwslib | rwslib/builders/metadata.py | Question.build | def build(self, builder):
"""
Build XML by appending to builder
.. note:: Questions can contain translations
"""
builder.start("Question", {})
for translation in self.translations:
translation.build(builder)
builder.end("Question") | python | def build(self, builder):
"""
Build XML by appending to builder
.. note:: Questions can contain translations
"""
builder.start("Question", {})
for translation in self.translations:
translation.build(builder)
builder.end("Question") | ['def', 'build', '(', 'self', ',', 'builder', ')', ':', 'builder', '.', 'start', '(', '"Question"', ',', '{', '}', ')', 'for', 'translation', 'in', 'self', '.', 'translations', ':', 'translation', '.', 'build', '(', 'builder', ')', 'builder', '.', 'end', '(', '"Question"', ')'] | Build XML by appending to builder
.. note:: Questions can contain translations | ['Build', 'XML', 'by', 'appending', 'to', 'builder'] | train | https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L1300-L1309 |
7,437 | Pithikos/python-websocket-server | websocket_server/websocket_server.py | WebSocketHandler.send_text | def send_text(self, message, opcode=OPCODE_TEXT):
"""
Important: Fragmented(=continuation) messages are not supported since
their usage cases are limited - when we don't know the payload length.
"""
# Validate message
if isinstance(message, bytes):
message = try_decode_UTF8(message) # this is slower but ensures we have UTF-8
if not message:
logger.warning("Can\'t send message, message is not valid UTF-8")
return False
elif sys.version_info < (3,0) and (isinstance(message, str) or isinstance(message, unicode)):
pass
elif isinstance(message, str):
pass
else:
logger.warning('Can\'t send message, message has to be a string or bytes. Given type is %s' % type(message))
return False
header = bytearray()
payload = encode_to_UTF8(message)
payload_length = len(payload)
# Normal payload
if payload_length <= 125:
header.append(FIN | opcode)
header.append(payload_length)
# Extended payload
elif payload_length >= 126 and payload_length <= 65535:
header.append(FIN | opcode)
header.append(PAYLOAD_LEN_EXT16)
header.extend(struct.pack(">H", payload_length))
# Huge extended payload
elif payload_length < 18446744073709551616:
header.append(FIN | opcode)
header.append(PAYLOAD_LEN_EXT64)
header.extend(struct.pack(">Q", payload_length))
else:
raise Exception("Message is too big. Consider breaking it into chunks.")
return
self.request.send(header + payload) | python | def send_text(self, message, opcode=OPCODE_TEXT):
"""
Important: Fragmented(=continuation) messages are not supported since
their usage cases are limited - when we don't know the payload length.
"""
# Validate message
if isinstance(message, bytes):
message = try_decode_UTF8(message) # this is slower but ensures we have UTF-8
if not message:
logger.warning("Can\'t send message, message is not valid UTF-8")
return False
elif sys.version_info < (3,0) and (isinstance(message, str) or isinstance(message, unicode)):
pass
elif isinstance(message, str):
pass
else:
logger.warning('Can\'t send message, message has to be a string or bytes. Given type is %s' % type(message))
return False
header = bytearray()
payload = encode_to_UTF8(message)
payload_length = len(payload)
# Normal payload
if payload_length <= 125:
header.append(FIN | opcode)
header.append(payload_length)
# Extended payload
elif payload_length >= 126 and payload_length <= 65535:
header.append(FIN | opcode)
header.append(PAYLOAD_LEN_EXT16)
header.extend(struct.pack(">H", payload_length))
# Huge extended payload
elif payload_length < 18446744073709551616:
header.append(FIN | opcode)
header.append(PAYLOAD_LEN_EXT64)
header.extend(struct.pack(">Q", payload_length))
else:
raise Exception("Message is too big. Consider breaking it into chunks.")
return
self.request.send(header + payload) | ['def', 'send_text', '(', 'self', ',', 'message', ',', 'opcode', '=', 'OPCODE_TEXT', ')', ':', '# Validate message', 'if', 'isinstance', '(', 'message', ',', 'bytes', ')', ':', 'message', '=', 'try_decode_UTF8', '(', 'message', ')', '# this is slower but ensures we have UTF-8', 'if', 'not', 'message', ':', 'logger', '.', 'warning', '(', '"Can\\\'t send message, message is not valid UTF-8"', ')', 'return', 'False', 'elif', 'sys', '.', 'version_info', '<', '(', '3', ',', '0', ')', 'and', '(', 'isinstance', '(', 'message', ',', 'str', ')', 'or', 'isinstance', '(', 'message', ',', 'unicode', ')', ')', ':', 'pass', 'elif', 'isinstance', '(', 'message', ',', 'str', ')', ':', 'pass', 'else', ':', 'logger', '.', 'warning', '(', "'Can\\'t send message, message has to be a string or bytes. Given type is %s'", '%', 'type', '(', 'message', ')', ')', 'return', 'False', 'header', '=', 'bytearray', '(', ')', 'payload', '=', 'encode_to_UTF8', '(', 'message', ')', 'payload_length', '=', 'len', '(', 'payload', ')', '# Normal payload', 'if', 'payload_length', '<=', '125', ':', 'header', '.', 'append', '(', 'FIN', '|', 'opcode', ')', 'header', '.', 'append', '(', 'payload_length', ')', '# Extended payload', 'elif', 'payload_length', '>=', '126', 'and', 'payload_length', '<=', '65535', ':', 'header', '.', 'append', '(', 'FIN', '|', 'opcode', ')', 'header', '.', 'append', '(', 'PAYLOAD_LEN_EXT16', ')', 'header', '.', 'extend', '(', 'struct', '.', 'pack', '(', '">H"', ',', 'payload_length', ')', ')', '# Huge extended payload', 'elif', 'payload_length', '<', '18446744073709551616', ':', 'header', '.', 'append', '(', 'FIN', '|', 'opcode', ')', 'header', '.', 'append', '(', 'PAYLOAD_LEN_EXT64', ')', 'header', '.', 'extend', '(', 'struct', '.', 'pack', '(', '">Q"', ',', 'payload_length', ')', ')', 'else', ':', 'raise', 'Exception', '(', '"Message is too big. Consider breaking it into chunks."', ')', 'return', 'self', '.', 'request', '.', 'send', '(', 'header', '+', 'payload', ')'] | Important: Fragmented(=continuation) messages are not supported since
their usage cases are limited - when we don't know the payload length. | ['Important', ':', 'Fragmented', '(', '=', 'continuation', ')', 'messages', 'are', 'not', 'supported', 'since', 'their', 'usage', 'cases', 'are', 'limited', '-', 'when', 'we', 'don', 't', 'know', 'the', 'payload', 'length', '.'] | train | https://github.com/Pithikos/python-websocket-server/blob/ae6ee7f5d400cde43e2cb89b8c5aec812e927082/websocket_server/websocket_server.py#L252-L297 |
7,438 | mitsei/dlkit | dlkit/json_/assessment/sessions.py | BankHierarchyDesignSession.remove_child_banks | def remove_child_banks(self, bank_id):
"""Removes all children from a bank.
arg: bank_id (osid.id.Id): the ``Id`` of a bank
raise: NotFound - ``bank_id`` is not in hierarchy
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalogs(catalog_id=bank_id)
return self._hierarchy_session.remove_children(id_=bank_id) | python | def remove_child_banks(self, bank_id):
"""Removes all children from a bank.
arg: bank_id (osid.id.Id): the ``Id`` of a bank
raise: NotFound - ``bank_id`` is not in hierarchy
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalogs(catalog_id=bank_id)
return self._hierarchy_session.remove_children(id_=bank_id) | ['def', 'remove_child_banks', '(', 'self', ',', 'bank_id', ')', ':', '# Implemented from template for', '# osid.resource.BinHierarchyDesignSession.remove_child_bin_template', 'if', 'self', '.', '_catalog_session', 'is', 'not', 'None', ':', 'return', 'self', '.', '_catalog_session', '.', 'remove_child_catalogs', '(', 'catalog_id', '=', 'bank_id', ')', 'return', 'self', '.', '_hierarchy_session', '.', 'remove_children', '(', 'id_', '=', 'bank_id', ')'] | Removes all children from a bank.
arg: bank_id (osid.id.Id): the ``Id`` of a bank
raise: NotFound - ``bank_id`` is not in hierarchy
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* | ['Removes', 'all', 'children', 'from', 'a', 'bank', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L9468-L9483 |
7,439 | mozilla/django-tidings | tidings/events.py | Event.notify | def notify(cls, user_or_email_, object_id=None, **filters):
"""Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time.
"""
# A test-for-existence-then-create race condition exists here, but it
# doesn't matter: de-duplication on fire() and deletion of all matches
# on stop_notifying() nullify its effects.
try:
# Pick 1 if >1 are returned:
watch = cls._watches_belonging_to_user(
user_or_email_,
object_id=object_id,
**filters)[0:1].get()
except Watch.DoesNotExist:
create_kwargs = {}
if cls.content_type:
create_kwargs['content_type'] = \
ContentType.objects.get_for_model(cls.content_type)
create_kwargs['email' if isinstance(user_or_email_, string_types)
else 'user'] = user_or_email_
# Letters that can't be mistaken for other letters or numbers in
# most fonts, in case people try to type these:
distinguishable_letters = \
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'
secret = ''.join(random.choice(distinguishable_letters)
for x in range(10))
# Registered users don't need to confirm, but anonymous users do.
is_active = ('user' in create_kwargs or
not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES)
if object_id:
create_kwargs['object_id'] = object_id
watch = Watch.objects.create(
secret=secret,
is_active=is_active,
event_type=cls.event_type,
**create_kwargs)
for k, v in iteritems(filters):
WatchFilter.objects.create(watch=watch, name=k,
value=hash_to_unsigned(v))
# Send email for inactive watches.
if not watch.is_active:
email = watch.user.email if watch.user else watch.email
message = cls._activation_email(watch, email)
try:
message.send()
except SMTPException as e:
watch.delete()
raise ActivationRequestFailed(e.recipients)
return watch | python | def notify(cls, user_or_email_, object_id=None, **filters):
"""Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time.
"""
# A test-for-existence-then-create race condition exists here, but it
# doesn't matter: de-duplication on fire() and deletion of all matches
# on stop_notifying() nullify its effects.
try:
# Pick 1 if >1 are returned:
watch = cls._watches_belonging_to_user(
user_or_email_,
object_id=object_id,
**filters)[0:1].get()
except Watch.DoesNotExist:
create_kwargs = {}
if cls.content_type:
create_kwargs['content_type'] = \
ContentType.objects.get_for_model(cls.content_type)
create_kwargs['email' if isinstance(user_or_email_, string_types)
else 'user'] = user_or_email_
# Letters that can't be mistaken for other letters or numbers in
# most fonts, in case people try to type these:
distinguishable_letters = \
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'
secret = ''.join(random.choice(distinguishable_letters)
for x in range(10))
# Registered users don't need to confirm, but anonymous users do.
is_active = ('user' in create_kwargs or
not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES)
if object_id:
create_kwargs['object_id'] = object_id
watch = Watch.objects.create(
secret=secret,
is_active=is_active,
event_type=cls.event_type,
**create_kwargs)
for k, v in iteritems(filters):
WatchFilter.objects.create(watch=watch, name=k,
value=hash_to_unsigned(v))
# Send email for inactive watches.
if not watch.is_active:
email = watch.user.email if watch.user else watch.email
message = cls._activation_email(watch, email)
try:
message.send()
except SMTPException as e:
watch.delete()
raise ActivationRequestFailed(e.recipients)
return watch | ['def', 'notify', '(', 'cls', ',', 'user_or_email_', ',', 'object_id', '=', 'None', ',', '*', '*', 'filters', ')', ':', '# A test-for-existence-then-create race condition exists here, but it', "# doesn't matter: de-duplication on fire() and deletion of all matches", '# on stop_notifying() nullify its effects.', 'try', ':', '# Pick 1 if >1 are returned:', 'watch', '=', 'cls', '.', '_watches_belonging_to_user', '(', 'user_or_email_', ',', 'object_id', '=', 'object_id', ',', '*', '*', 'filters', ')', '[', '0', ':', '1', ']', '.', 'get', '(', ')', 'except', 'Watch', '.', 'DoesNotExist', ':', 'create_kwargs', '=', '{', '}', 'if', 'cls', '.', 'content_type', ':', 'create_kwargs', '[', "'content_type'", ']', '=', 'ContentType', '.', 'objects', '.', 'get_for_model', '(', 'cls', '.', 'content_type', ')', 'create_kwargs', '[', "'email'", 'if', 'isinstance', '(', 'user_or_email_', ',', 'string_types', ')', 'else', "'user'", ']', '=', 'user_or_email_', "# Letters that can't be mistaken for other letters or numbers in", '# most fonts, in case people try to type these:', 'distinguishable_letters', '=', "'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'", 'secret', '=', "''", '.', 'join', '(', 'random', '.', 'choice', '(', 'distinguishable_letters', ')', 'for', 'x', 'in', 'range', '(', '10', ')', ')', "# Registered users don't need to confirm, but anonymous users do.", 'is_active', '=', '(', "'user'", 'in', 'create_kwargs', 'or', 'not', 'settings', '.', 'TIDINGS_CONFIRM_ANONYMOUS_WATCHES', ')', 'if', 'object_id', ':', 'create_kwargs', '[', "'object_id'", ']', '=', 'object_id', 'watch', '=', 'Watch', '.', 'objects', '.', 'create', '(', 'secret', '=', 'secret', ',', 'is_active', '=', 'is_active', ',', 'event_type', '=', 'cls', '.', 'event_type', ',', '*', '*', 'create_kwargs', ')', 'for', 'k', ',', 'v', 'in', 'iteritems', '(', 'filters', ')', ':', 'WatchFilter', '.', 'objects', '.', 'create', '(', 'watch', '=', 'watch', ',', 'name', '=', 'k', ',', 'value', '=', 'hash_to_unsigned', '(', 'v', ')', ')', '# Send email for inactive watches.', 'if', 'not', 'watch', '.', 'is_active', ':', 'email', '=', 'watch', '.', 'user', '.', 'email', 'if', 'watch', '.', 'user', 'else', 'watch', '.', 'email', 'message', '=', 'cls', '.', '_activation_email', '(', 'watch', ',', 'email', ')', 'try', ':', 'message', '.', 'send', '(', ')', 'except', 'SMTPException', 'as', 'e', ':', 'watch', '.', 'delete', '(', ')', 'raise', 'ActivationRequestFailed', '(', 'e', '.', 'recipients', ')', 'return', 'watch'] | Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time. | ['Start', 'notifying', 'the', 'given', 'user', 'or', 'email', 'address', 'when', 'this', 'event', 'occurs', 'and', 'meets', 'the', 'criteria', 'given', 'in', 'filters', '.'] | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L364-L427 |
7,440 | 1flow/python-ftr | ftr/config.py | SiteConfig.reset | def reset(self):
""" (re)set all attributes to defaults (eg. empty sets or ``None``). """
# Use first matching element as title (0 or more xpath expressions)
self.title = OrderedSet()
# Use first matching element as body (0 or more xpath expressions)
self.body = OrderedSet()
# Use first matching element as author (0 or more xpath expressions)
self.author = OrderedSet()
# Use first matching element as date (0 or more xpath expressions)
self.date = OrderedSet()
# Put language here. It's not supported in siteconfig syntax,
# but having it here allows more generic handling in extractor.
self.language = (
'//html[@lang]/@lang',
'//meta[@name="DC.language"]/@content',
)
# Strip elements matching these xpath expressions (0 or more)
self.strip = OrderedSet()
# Strip 0 or more elements which contain these
# strings in the id or class attribute.
self.strip_id_or_class = OrderedSet()
# Strip 0 or more images which contain
# these strings in the src attribute.
self.strip_image_src = OrderedSet()
# Additional HTTP headers to send
# NOT YET USED
self.http_header = OrderedSet()
# For those 3, None means that default will be used. But we need
# None to distinguish from False during multiple configurations
# merges.
self.tidy = None
self.prune = None
self.autodetect_on_failure = None
# Test URL - if present, can be used to test the config above
self.test_url = OrderedSet()
self.test_contains = OrderedSet()
# Single-page link should identify a link element or URL pointing
# to the page holding the entire article.
#
# This is useful for sites which split their articles across
# multiple pages. Links to such pages tend to display the first
# page with links to the other pages at the bottom.
#
# Often there is also a link to a page which displays the entire
# article on one page (e.g. 'print view').
#
# `single_page_link` should be an XPath expression identifying the
# link to that single page. If present and we find a match, we will
# retrieve that page and the rest of the options in this config will
# be applied to the new page.
self.single_page_link = OrderedSet()
self.next_page_link = OrderedSet()
# Single-page link in feed? - same as above, but patterns applied
# to item description HTML taken from feed. XXX
self.single_page_link_in_feed = OrderedSet()
# Which parser to use for turning raw HTML into a DOMDocument,
# either `libxml` (PHP) / `lxml` (Python) or `html5lib`. Defaults
# to `lxml` if None.
self.parser = None
# Strings to search for in HTML before processing begins. Goes by
# pairs with `replace_string`. Not a set because we can have more
# than one of the same, to be replaced by different values.
self.find_string = []
# Strings to replace those found in `find_string` before HTML
# processing begins.
self.replace_string = [] | python | def reset(self):
""" (re)set all attributes to defaults (eg. empty sets or ``None``). """
# Use first matching element as title (0 or more xpath expressions)
self.title = OrderedSet()
# Use first matching element as body (0 or more xpath expressions)
self.body = OrderedSet()
# Use first matching element as author (0 or more xpath expressions)
self.author = OrderedSet()
# Use first matching element as date (0 or more xpath expressions)
self.date = OrderedSet()
# Put language here. It's not supported in siteconfig syntax,
# but having it here allows more generic handling in extractor.
self.language = (
'//html[@lang]/@lang',
'//meta[@name="DC.language"]/@content',
)
# Strip elements matching these xpath expressions (0 or more)
self.strip = OrderedSet()
# Strip 0 or more elements which contain these
# strings in the id or class attribute.
self.strip_id_or_class = OrderedSet()
# Strip 0 or more images which contain
# these strings in the src attribute.
self.strip_image_src = OrderedSet()
# Additional HTTP headers to send
# NOT YET USED
self.http_header = OrderedSet()
# For those 3, None means that default will be used. But we need
# None to distinguish from False during multiple configurations
# merges.
self.tidy = None
self.prune = None
self.autodetect_on_failure = None
# Test URL - if present, can be used to test the config above
self.test_url = OrderedSet()
self.test_contains = OrderedSet()
# Single-page link should identify a link element or URL pointing
# to the page holding the entire article.
#
# This is useful for sites which split their articles across
# multiple pages. Links to such pages tend to display the first
# page with links to the other pages at the bottom.
#
# Often there is also a link to a page which displays the entire
# article on one page (e.g. 'print view').
#
# `single_page_link` should be an XPath expression identifying the
# link to that single page. If present and we find a match, we will
# retrieve that page and the rest of the options in this config will
# be applied to the new page.
self.single_page_link = OrderedSet()
self.next_page_link = OrderedSet()
# Single-page link in feed? - same as above, but patterns applied
# to item description HTML taken from feed. XXX
self.single_page_link_in_feed = OrderedSet()
# Which parser to use for turning raw HTML into a DOMDocument,
# either `libxml` (PHP) / `lxml` (Python) or `html5lib`. Defaults
# to `lxml` if None.
self.parser = None
# Strings to search for in HTML before processing begins. Goes by
# pairs with `replace_string`. Not a set because we can have more
# than one of the same, to be replaced by different values.
self.find_string = []
# Strings to replace those found in `find_string` before HTML
# processing begins.
self.replace_string = [] | ['def', 'reset', '(', 'self', ')', ':', '# Use first matching element as title (0 or more xpath expressions)', 'self', '.', 'title', '=', 'OrderedSet', '(', ')', '# Use first matching element as body (0 or more xpath expressions)', 'self', '.', 'body', '=', 'OrderedSet', '(', ')', '# Use first matching element as author (0 or more xpath expressions)', 'self', '.', 'author', '=', 'OrderedSet', '(', ')', '# Use first matching element as date (0 or more xpath expressions)', 'self', '.', 'date', '=', 'OrderedSet', '(', ')', "# Put language here. It's not supported in siteconfig syntax,", '# but having it here allows more generic handling in extractor.', 'self', '.', 'language', '=', '(', "'//html[@lang]/@lang'", ',', '\'//meta[@name="DC.language"]/@content\'', ',', ')', '# Strip elements matching these xpath expressions (0 or more)', 'self', '.', 'strip', '=', 'OrderedSet', '(', ')', '# Strip 0 or more elements which contain these', '# strings in the id or class attribute.', 'self', '.', 'strip_id_or_class', '=', 'OrderedSet', '(', ')', '# Strip 0 or more images which contain', '# these strings in the src attribute.', 'self', '.', 'strip_image_src', '=', 'OrderedSet', '(', ')', '# Additional HTTP headers to send', '# NOT YET USED', 'self', '.', 'http_header', '=', 'OrderedSet', '(', ')', '# For those 3, None means that default will be used. But we need', '# None to distinguish from False during multiple configurations', '# merges.', 'self', '.', 'tidy', '=', 'None', 'self', '.', 'prune', '=', 'None', 'self', '.', 'autodetect_on_failure', '=', 'None', '# Test URL - if present, can be used to test the config above', 'self', '.', 'test_url', '=', 'OrderedSet', '(', ')', 'self', '.', 'test_contains', '=', 'OrderedSet', '(', ')', '# Single-page link should identify a link element or URL pointing', '# to the page holding the entire article.', '#', '# This is useful for sites which split their articles across', '# multiple pages. Links to such pages tend to display the first', '# page with links to the other pages at the bottom.', '#', '# Often there is also a link to a page which displays the entire', "# article on one page (e.g. 'print view').", '#', '# `single_page_link` should be an XPath expression identifying the', '# link to that single page. If present and we find a match, we will', '# retrieve that page and the rest of the options in this config will', '# be applied to the new page.', 'self', '.', 'single_page_link', '=', 'OrderedSet', '(', ')', 'self', '.', 'next_page_link', '=', 'OrderedSet', '(', ')', '# Single-page link in feed? - same as above, but patterns applied', '# to item description HTML taken from feed. XXX', 'self', '.', 'single_page_link_in_feed', '=', 'OrderedSet', '(', ')', '# Which parser to use for turning raw HTML into a DOMDocument,', '# either `libxml` (PHP) / `lxml` (Python) or `html5lib`. Defaults', '# to `lxml` if None.', 'self', '.', 'parser', '=', 'None', '# Strings to search for in HTML before processing begins. Goes by', '# pairs with `replace_string`. Not a set because we can have more', '# than one of the same, to be replaced by different values.', 'self', '.', 'find_string', '=', '[', ']', '# Strings to replace those found in `find_string` before HTML', '# processing begins.', 'self', '.', 'replace_string', '=', '[', ']'] | (re)set all attributes to defaults (eg. empty sets or ``None``). | ['(', 're', ')', 'set', 'all', 'attributes', 'to', 'defaults', '(', 'eg', '.', 'empty', 'sets', 'or', 'None', ')', '.'] | train | https://github.com/1flow/python-ftr/blob/90a2108c5ee005f1bf66dbe8cce68f2b7051b839/ftr/config.py#L408-L490 |
7,441 | fake-name/ChromeController | ChromeController/Generator/Generated.py | ChromeRemoteDebugInterface.Input_synthesizePinchGesture | def Input_synthesizePinchGesture(self, x, y, scaleFactor, **kwargs):
"""
Function path: Input.synthesizePinchGesture
Domain: Input
Method name: synthesizePinchGesture
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'x' (type: number) -> X coordinate of the start of the gesture in CSS pixels.
'y' (type: number) -> Y coordinate of the start of the gesture in CSS pixels.
'scaleFactor' (type: number) -> Relative scale factor after zooming (>1.0 zooms in, <1.0 zooms out).
Optional arguments:
'relativeSpeed' (type: integer) -> Relative pointer speed in pixels per second (default: 800).
'gestureSourceType' (type: GestureSourceType) -> Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type).
No return value.
Description: Synthesizes a pinch gesture over a time period by issuing appropriate touch events.
"""
assert isinstance(x, (float, int)
), "Argument 'x' must be of type '['float', 'int']'. Received type: '%s'" % type(
x)
assert isinstance(y, (float, int)
), "Argument 'y' must be of type '['float', 'int']'. Received type: '%s'" % type(
y)
assert isinstance(scaleFactor, (float, int)
), "Argument 'scaleFactor' must be of type '['float', 'int']'. Received type: '%s'" % type(
scaleFactor)
if 'relativeSpeed' in kwargs:
assert isinstance(kwargs['relativeSpeed'], (int,)
), "Optional argument 'relativeSpeed' must be of type '['int']'. Received type: '%s'" % type(
kwargs['relativeSpeed'])
expected = ['relativeSpeed', 'gestureSourceType']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['relativeSpeed', 'gestureSourceType']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Input.synthesizePinchGesture', x
=x, y=y, scaleFactor=scaleFactor, **kwargs)
return subdom_funcs | python | def Input_synthesizePinchGesture(self, x, y, scaleFactor, **kwargs):
"""
Function path: Input.synthesizePinchGesture
Domain: Input
Method name: synthesizePinchGesture
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'x' (type: number) -> X coordinate of the start of the gesture in CSS pixels.
'y' (type: number) -> Y coordinate of the start of the gesture in CSS pixels.
'scaleFactor' (type: number) -> Relative scale factor after zooming (>1.0 zooms in, <1.0 zooms out).
Optional arguments:
'relativeSpeed' (type: integer) -> Relative pointer speed in pixels per second (default: 800).
'gestureSourceType' (type: GestureSourceType) -> Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type).
No return value.
Description: Synthesizes a pinch gesture over a time period by issuing appropriate touch events.
"""
assert isinstance(x, (float, int)
), "Argument 'x' must be of type '['float', 'int']'. Received type: '%s'" % type(
x)
assert isinstance(y, (float, int)
), "Argument 'y' must be of type '['float', 'int']'. Received type: '%s'" % type(
y)
assert isinstance(scaleFactor, (float, int)
), "Argument 'scaleFactor' must be of type '['float', 'int']'. Received type: '%s'" % type(
scaleFactor)
if 'relativeSpeed' in kwargs:
assert isinstance(kwargs['relativeSpeed'], (int,)
), "Optional argument 'relativeSpeed' must be of type '['int']'. Received type: '%s'" % type(
kwargs['relativeSpeed'])
expected = ['relativeSpeed', 'gestureSourceType']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['relativeSpeed', 'gestureSourceType']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Input.synthesizePinchGesture', x
=x, y=y, scaleFactor=scaleFactor, **kwargs)
return subdom_funcs | ['def', 'Input_synthesizePinchGesture', '(', 'self', ',', 'x', ',', 'y', ',', 'scaleFactor', ',', '*', '*', 'kwargs', ')', ':', 'assert', 'isinstance', '(', 'x', ',', '(', 'float', ',', 'int', ')', ')', ',', '"Argument \'x\' must be of type \'[\'float\', \'int\']\'. Received type: \'%s\'"', '%', 'type', '(', 'x', ')', 'assert', 'isinstance', '(', 'y', ',', '(', 'float', ',', 'int', ')', ')', ',', '"Argument \'y\' must be of type \'[\'float\', \'int\']\'. Received type: \'%s\'"', '%', 'type', '(', 'y', ')', 'assert', 'isinstance', '(', 'scaleFactor', ',', '(', 'float', ',', 'int', ')', ')', ',', '"Argument \'scaleFactor\' must be of type \'[\'float\', \'int\']\'. Received type: \'%s\'"', '%', 'type', '(', 'scaleFactor', ')', 'if', "'relativeSpeed'", 'in', 'kwargs', ':', 'assert', 'isinstance', '(', 'kwargs', '[', "'relativeSpeed'", ']', ',', '(', 'int', ',', ')', ')', ',', '"Optional argument \'relativeSpeed\' must be of type \'[\'int\']\'. Received type: \'%s\'"', '%', 'type', '(', 'kwargs', '[', "'relativeSpeed'", ']', ')', 'expected', '=', '[', "'relativeSpeed'", ',', "'gestureSourceType'", ']', 'passed_keys', '=', 'list', '(', 'kwargs', '.', 'keys', '(', ')', ')', 'assert', 'all', '(', '[', '(', 'key', 'in', 'expected', ')', 'for', 'key', 'in', 'passed_keys', ']', ')', ',', '"Allowed kwargs are [\'relativeSpeed\', \'gestureSourceType\']. Passed kwargs: %s"', '%', 'passed_keys', 'subdom_funcs', '=', 'self', '.', 'synchronous_command', '(', "'Input.synthesizePinchGesture'", ',', 'x', '=', 'x', ',', 'y', '=', 'y', ',', 'scaleFactor', '=', 'scaleFactor', ',', '*', '*', 'kwargs', ')', 'return', 'subdom_funcs'] | Function path: Input.synthesizePinchGesture
Domain: Input
Method name: synthesizePinchGesture
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'x' (type: number) -> X coordinate of the start of the gesture in CSS pixels.
'y' (type: number) -> Y coordinate of the start of the gesture in CSS pixels.
'scaleFactor' (type: number) -> Relative scale factor after zooming (>1.0 zooms in, <1.0 zooms out).
Optional arguments:
'relativeSpeed' (type: integer) -> Relative pointer speed in pixels per second (default: 800).
'gestureSourceType' (type: GestureSourceType) -> Which type of input events to be generated (default: 'default', which queries the platform for the preferred input type).
No return value.
Description: Synthesizes a pinch gesture over a time period by issuing appropriate touch events. | ['Function', 'path', ':', 'Input', '.', 'synthesizePinchGesture', 'Domain', ':', 'Input', 'Method', 'name', ':', 'synthesizePinchGesture', 'WARNING', ':', 'This', 'function', 'is', 'marked', 'Experimental', '!', 'Parameters', ':', 'Required', 'arguments', ':', 'x', '(', 'type', ':', 'number', ')', '-', '>', 'X', 'coordinate', 'of', 'the', 'start', 'of', 'the', 'gesture', 'in', 'CSS', 'pixels', '.', 'y', '(', 'type', ':', 'number', ')', '-', '>', 'Y', 'coordinate', 'of', 'the', 'start', 'of', 'the', 'gesture', 'in', 'CSS', 'pixels', '.', 'scaleFactor', '(', 'type', ':', 'number', ')', '-', '>', 'Relative', 'scale', 'factor', 'after', 'zooming', '(', '>', '1', '.', '0', 'zooms', 'in', '<1', '.', '0', 'zooms', 'out', ')', '.', 'Optional', 'arguments', ':', 'relativeSpeed', '(', 'type', ':', 'integer', ')', '-', '>', 'Relative', 'pointer', 'speed', 'in', 'pixels', 'per', 'second', '(', 'default', ':', '800', ')', '.', 'gestureSourceType', '(', 'type', ':', 'GestureSourceType', ')', '-', '>', 'Which', 'type', 'of', 'input', 'events', 'to', 'be', 'generated', '(', 'default', ':', 'default', 'which', 'queries', 'the', 'platform', 'for', 'the', 'preferred', 'input', 'type', ')', '.', 'No', 'return', 'value', '.', 'Description', ':', 'Synthesizes', 'a', 'pinch', 'gesture', 'over', 'a', 'time', 'period', 'by', 'issuing', 'appropriate', 'touch', 'events', '.'] | train | https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L5491-L5530 |
7,442 | sebdah/dynamic-dynamodb | dynamic_dynamodb/calculators.py | increase_writes_in_units | def increase_writes_in_units(
current_provisioning, units, max_provisioned_writes,
consumed_write_units_percent, log_tag):
""" Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type max_provisioned_writes: int
:param max_provisioned_writes: Configured max provisioned writes
:type consumed_write_units_percent: float
:param consumed_write_units_percent: Number of consumed write units
:type log_tag: str
:param log_tag: Prefix for the log
"""
units = int(units)
current_provisioning = float(current_provisioning)
consumed_write_units_percent = float(consumed_write_units_percent)
consumption_based_current_provisioning = \
int(math.ceil(current_provisioning*(consumed_write_units_percent/100)))
if consumption_based_current_provisioning > current_provisioning:
updated_provisioning = consumption_based_current_provisioning + units
else:
updated_provisioning = int(current_provisioning) + units
if max_provisioned_writes > 0:
if updated_provisioning > max_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes max limit: {1}'.format(
log_tag,
max_provisioned_writes))
return max_provisioned_writes
logger.debug(
'{0} - Write provisioning will be increased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning | python | def increase_writes_in_units(
current_provisioning, units, max_provisioned_writes,
consumed_write_units_percent, log_tag):
""" Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type max_provisioned_writes: int
:param max_provisioned_writes: Configured max provisioned writes
:type consumed_write_units_percent: float
:param consumed_write_units_percent: Number of consumed write units
:type log_tag: str
:param log_tag: Prefix for the log
"""
units = int(units)
current_provisioning = float(current_provisioning)
consumed_write_units_percent = float(consumed_write_units_percent)
consumption_based_current_provisioning = \
int(math.ceil(current_provisioning*(consumed_write_units_percent/100)))
if consumption_based_current_provisioning > current_provisioning:
updated_provisioning = consumption_based_current_provisioning + units
else:
updated_provisioning = int(current_provisioning) + units
if max_provisioned_writes > 0:
if updated_provisioning > max_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes max limit: {1}'.format(
log_tag,
max_provisioned_writes))
return max_provisioned_writes
logger.debug(
'{0} - Write provisioning will be increased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning | ['def', 'increase_writes_in_units', '(', 'current_provisioning', ',', 'units', ',', 'max_provisioned_writes', ',', 'consumed_write_units_percent', ',', 'log_tag', ')', ':', 'units', '=', 'int', '(', 'units', ')', 'current_provisioning', '=', 'float', '(', 'current_provisioning', ')', 'consumed_write_units_percent', '=', 'float', '(', 'consumed_write_units_percent', ')', 'consumption_based_current_provisioning', '=', 'int', '(', 'math', '.', 'ceil', '(', 'current_provisioning', '*', '(', 'consumed_write_units_percent', '/', '100', ')', ')', ')', 'if', 'consumption_based_current_provisioning', '>', 'current_provisioning', ':', 'updated_provisioning', '=', 'consumption_based_current_provisioning', '+', 'units', 'else', ':', 'updated_provisioning', '=', 'int', '(', 'current_provisioning', ')', '+', 'units', 'if', 'max_provisioned_writes', '>', '0', ':', 'if', 'updated_provisioning', '>', 'max_provisioned_writes', ':', 'logger', '.', 'info', '(', "'{0} - Reached provisioned writes max limit: {1}'", '.', 'format', '(', 'log_tag', ',', 'max_provisioned_writes', ')', ')', 'return', 'max_provisioned_writes', 'logger', '.', 'debug', '(', "'{0} - Write provisioning will be increased to {1:d} units'", '.', 'format', '(', 'log_tag', ',', 'int', '(', 'updated_provisioning', ')', ')', ')', 'return', 'updated_provisioning'] | Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type max_provisioned_writes: int
:param max_provisioned_writes: Configured max provisioned writes
:type consumed_write_units_percent: float
:param consumed_write_units_percent: Number of consumed write units
:type log_tag: str
:param log_tag: Prefix for the log | ['Increase', 'the', 'current_provisioning', 'with', 'units', 'units'] | train | https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/calculators.py#L297-L339 |
7,443 | sixty-north/cosmic-ray | src/cosmic_ray/modules.py | find_modules | def find_modules(module_path):
"""Find all modules in the module (possibly package) represented by `module_path`.
Args:
module_path: A pathlib.Path to a Python package or module.
Returns: An iterable of paths Python modules (i.e. *py files).
"""
if module_path.is_file():
if module_path.suffix == '.py':
yield module_path
elif module_path.is_dir():
pyfiles = glob.glob('{}/**/*.py'.format(module_path), recursive=True)
yield from (Path(pyfile) for pyfile in pyfiles) | python | def find_modules(module_path):
"""Find all modules in the module (possibly package) represented by `module_path`.
Args:
module_path: A pathlib.Path to a Python package or module.
Returns: An iterable of paths Python modules (i.e. *py files).
"""
if module_path.is_file():
if module_path.suffix == '.py':
yield module_path
elif module_path.is_dir():
pyfiles = glob.glob('{}/**/*.py'.format(module_path), recursive=True)
yield from (Path(pyfile) for pyfile in pyfiles) | ['def', 'find_modules', '(', 'module_path', ')', ':', 'if', 'module_path', '.', 'is_file', '(', ')', ':', 'if', 'module_path', '.', 'suffix', '==', "'.py'", ':', 'yield', 'module_path', 'elif', 'module_path', '.', 'is_dir', '(', ')', ':', 'pyfiles', '=', 'glob', '.', 'glob', '(', "'{}/**/*.py'", '.', 'format', '(', 'module_path', ')', ',', 'recursive', '=', 'True', ')', 'yield', 'from', '(', 'Path', '(', 'pyfile', ')', 'for', 'pyfile', 'in', 'pyfiles', ')'] | Find all modules in the module (possibly package) represented by `module_path`.
Args:
module_path: A pathlib.Path to a Python package or module.
Returns: An iterable of paths Python modules (i.e. *py files). | ['Find', 'all', 'modules', 'in', 'the', 'module', '(', 'possibly', 'package', ')', 'represented', 'by', 'module_path', '.'] | train | https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/modules.py#L7-L20 |
7,444 | havardgulldahl/jottalib | src/jottalib/JFS.py | JFS.getObject | def getObject(self, url_or_requests_response, params=None):
'Take a url or some xml response from JottaCloud and wrap it up with the corresponding JFS* class'
if isinstance(url_or_requests_response, requests.models.Response):
# this is a raw xml response that we need to parse
url = url_or_requests_response.url
o = lxml.objectify.fromstring(url_or_requests_response.content)
else:
# this is an url that we need to fetch
url = url_or_requests_response
o = self.get(url, params=params) # (.get() will parse this for us)
parent = os.path.dirname(url).replace('up.jottacloud.com', 'www.jottacloud.com')
if o.tag == 'error':
JFSError.raiseError(o, url)
elif o.tag == 'device': return JFSDevice(o, jfs=self, parentpath=parent)
elif o.tag == 'folder': return JFSFolder(o, jfs=self, parentpath=parent)
elif o.tag == 'mountPoint': return JFSMountPoint(o, jfs=self, parentpath=parent)
elif o.tag == 'restoredFiles': return JFSFile(o, jfs=self, parentpath=parent)
elif o.tag == 'deleteFiles': return JFSFile(o, jfs=self, parentpath=parent)
elif o.tag == 'file':
return ProtoFile.factory(o, jfs=self, parentpath=parent)
# try:
# if o.latestRevision.state == 'INCOMPLETE':
# return JFSIncompleteFile(o, jfs=self, parentpath=parent)
# elif o.latestRevision.state == 'CORRUPT':
# return JFSCorruptFile(o, jfs=self, parentpath=parent)
# except AttributeError:
# return JFSFile(o, jfs=self, parentpath=parent)
elif o.tag == 'enableSharing': return JFSenableSharing(o, jfs=self)
elif o.tag == 'user':
self.fs = o
return self.fs
elif o.tag == 'filedirlist': return JFSFileDirList(o, jfs=self, parentpath=parent)
elif o.tag == 'searchresult': return JFSsearchresult(o, jfs=self)
raise JFSError("invalid object: %s <- %s" % (repr(o), url_or_requests_response)) | python | def getObject(self, url_or_requests_response, params=None):
'Take a url or some xml response from JottaCloud and wrap it up with the corresponding JFS* class'
if isinstance(url_or_requests_response, requests.models.Response):
# this is a raw xml response that we need to parse
url = url_or_requests_response.url
o = lxml.objectify.fromstring(url_or_requests_response.content)
else:
# this is an url that we need to fetch
url = url_or_requests_response
o = self.get(url, params=params) # (.get() will parse this for us)
parent = os.path.dirname(url).replace('up.jottacloud.com', 'www.jottacloud.com')
if o.tag == 'error':
JFSError.raiseError(o, url)
elif o.tag == 'device': return JFSDevice(o, jfs=self, parentpath=parent)
elif o.tag == 'folder': return JFSFolder(o, jfs=self, parentpath=parent)
elif o.tag == 'mountPoint': return JFSMountPoint(o, jfs=self, parentpath=parent)
elif o.tag == 'restoredFiles': return JFSFile(o, jfs=self, parentpath=parent)
elif o.tag == 'deleteFiles': return JFSFile(o, jfs=self, parentpath=parent)
elif o.tag == 'file':
return ProtoFile.factory(o, jfs=self, parentpath=parent)
# try:
# if o.latestRevision.state == 'INCOMPLETE':
# return JFSIncompleteFile(o, jfs=self, parentpath=parent)
# elif o.latestRevision.state == 'CORRUPT':
# return JFSCorruptFile(o, jfs=self, parentpath=parent)
# except AttributeError:
# return JFSFile(o, jfs=self, parentpath=parent)
elif o.tag == 'enableSharing': return JFSenableSharing(o, jfs=self)
elif o.tag == 'user':
self.fs = o
return self.fs
elif o.tag == 'filedirlist': return JFSFileDirList(o, jfs=self, parentpath=parent)
elif o.tag == 'searchresult': return JFSsearchresult(o, jfs=self)
raise JFSError("invalid object: %s <- %s" % (repr(o), url_or_requests_response)) | ['def', 'getObject', '(', 'self', ',', 'url_or_requests_response', ',', 'params', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'url_or_requests_response', ',', 'requests', '.', 'models', '.', 'Response', ')', ':', '# this is a raw xml response that we need to parse', 'url', '=', 'url_or_requests_response', '.', 'url', 'o', '=', 'lxml', '.', 'objectify', '.', 'fromstring', '(', 'url_or_requests_response', '.', 'content', ')', 'else', ':', '# this is an url that we need to fetch', 'url', '=', 'url_or_requests_response', 'o', '=', 'self', '.', 'get', '(', 'url', ',', 'params', '=', 'params', ')', '# (.get() will parse this for us)', 'parent', '=', 'os', '.', 'path', '.', 'dirname', '(', 'url', ')', '.', 'replace', '(', "'up.jottacloud.com'", ',', "'www.jottacloud.com'", ')', 'if', 'o', '.', 'tag', '==', "'error'", ':', 'JFSError', '.', 'raiseError', '(', 'o', ',', 'url', ')', 'elif', 'o', '.', 'tag', '==', "'device'", ':', 'return', 'JFSDevice', '(', 'o', ',', 'jfs', '=', 'self', ',', 'parentpath', '=', 'parent', ')', 'elif', 'o', '.', 'tag', '==', "'folder'", ':', 'return', 'JFSFolder', '(', 'o', ',', 'jfs', '=', 'self', ',', 'parentpath', '=', 'parent', ')', 'elif', 'o', '.', 'tag', '==', "'mountPoint'", ':', 'return', 'JFSMountPoint', '(', 'o', ',', 'jfs', '=', 'self', ',', 'parentpath', '=', 'parent', ')', 'elif', 'o', '.', 'tag', '==', "'restoredFiles'", ':', 'return', 'JFSFile', '(', 'o', ',', 'jfs', '=', 'self', ',', 'parentpath', '=', 'parent', ')', 'elif', 'o', '.', 'tag', '==', "'deleteFiles'", ':', 'return', 'JFSFile', '(', 'o', ',', 'jfs', '=', 'self', ',', 'parentpath', '=', 'parent', ')', 'elif', 'o', '.', 'tag', '==', "'file'", ':', 'return', 'ProtoFile', '.', 'factory', '(', 'o', ',', 'jfs', '=', 'self', ',', 'parentpath', '=', 'parent', ')', '# try:', "# if o.latestRevision.state == 'INCOMPLETE':", '# return JFSIncompleteFile(o, jfs=self, parentpath=parent)', "# elif o.latestRevision.state == 'CORRUPT':", '# return JFSCorruptFile(o, jfs=self, parentpath=parent)', '# except AttributeError:', '# return JFSFile(o, jfs=self, parentpath=parent)', 'elif', 'o', '.', 'tag', '==', "'enableSharing'", ':', 'return', 'JFSenableSharing', '(', 'o', ',', 'jfs', '=', 'self', ')', 'elif', 'o', '.', 'tag', '==', "'user'", ':', 'self', '.', 'fs', '=', 'o', 'return', 'self', '.', 'fs', 'elif', 'o', '.', 'tag', '==', "'filedirlist'", ':', 'return', 'JFSFileDirList', '(', 'o', ',', 'jfs', '=', 'self', ',', 'parentpath', '=', 'parent', ')', 'elif', 'o', '.', 'tag', '==', "'searchresult'", ':', 'return', 'JFSsearchresult', '(', 'o', ',', 'jfs', '=', 'self', ')', 'raise', 'JFSError', '(', '"invalid object: %s <- %s"', '%', '(', 'repr', '(', 'o', ')', ',', 'url_or_requests_response', ')', ')'] | Take a url or some xml response from JottaCloud and wrap it up with the corresponding JFS* class | ['Take', 'a', 'url', 'or', 'some', 'xml', 'response', 'from', 'JottaCloud', 'and', 'wrap', 'it', 'up', 'with', 'the', 'corresponding', 'JFS', '*', 'class'] | train | https://github.com/havardgulldahl/jottalib/blob/4d015e4309b1d9055e561ec757363fb2632b4eb7/src/jottalib/JFS.py#L1024-L1058 |
7,445 | GeorgeArgyros/symautomata | symautomata/pdacnf.py | ReducePDA.get | def get(self, statediag):
"""
Args:
statediag (list): The states of the PDA
Returns:
list: A reduced list of states using BFS
"""
if len(statediag) < 1:
print 'PDA is empty and can not be reduced'
return statediag
newstatediag = self.bfs(statediag, statediag[0])
return newstatediag | python | def get(self, statediag):
"""
Args:
statediag (list): The states of the PDA
Returns:
list: A reduced list of states using BFS
"""
if len(statediag) < 1:
print 'PDA is empty and can not be reduced'
return statediag
newstatediag = self.bfs(statediag, statediag[0])
return newstatediag | ['def', 'get', '(', 'self', ',', 'statediag', ')', ':', 'if', 'len', '(', 'statediag', ')', '<', '1', ':', 'print', "'PDA is empty and can not be reduced'", 'return', 'statediag', 'newstatediag', '=', 'self', '.', 'bfs', '(', 'statediag', ',', 'statediag', '[', '0', ']', ')', 'return', 'newstatediag'] | Args:
statediag (list): The states of the PDA
Returns:
list: A reduced list of states using BFS | ['Args', ':', 'statediag', '(', 'list', ')', ':', 'The', 'states', 'of', 'the', 'PDA', 'Returns', ':', 'list', ':', 'A', 'reduced', 'list', 'of', 'states', 'using', 'BFS'] | train | https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/pdacnf.py#L106-L117 |
7,446 | ray-project/ray | python/ray/tune/schedulers/median_stopping_rule.py | MedianStoppingRule.on_trial_remove | def on_trial_remove(self, trial_runner, trial):
"""Marks trial as completed if it is paused and has previously ran."""
if trial.status is Trial.PAUSED and trial in self._results:
self._completed_trials.add(trial) | python | def on_trial_remove(self, trial_runner, trial):
"""Marks trial as completed if it is paused and has previously ran."""
if trial.status is Trial.PAUSED and trial in self._results:
self._completed_trials.add(trial) | ['def', 'on_trial_remove', '(', 'self', ',', 'trial_runner', ',', 'trial', ')', ':', 'if', 'trial', '.', 'status', 'is', 'Trial', '.', 'PAUSED', 'and', 'trial', 'in', 'self', '.', '_results', ':', 'self', '.', '_completed_trials', '.', 'add', '(', 'trial', ')'] | Marks trial as completed if it is paused and has previously ran. | ['Marks', 'trial', 'as', 'completed', 'if', 'it', 'is', 'paused', 'and', 'has', 'previously', 'ran', '.'] | train | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/schedulers/median_stopping_rule.py#L91-L94 |
7,447 | vlukes/dicom2fem | dicom2fem/meshio.py | for_format | def for_format(filename, format=None, writable=False, prefix_dir=None):
"""
Create a MeshIO instance for file `filename` with forced `format`.
Parameters
----------
filename : str
The name of the mesh file.
format : str
One of supported formats. If None,
:func:`MeshIO.any_from_filename()` is called instead.
writable : bool
If True, verify that the mesh format is writable.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the `format`.
"""
ext = op.splitext(filename)[1].lower()
try:
_format = supported_formats[ext]
except KeyError:
_format = None
format = get_default(format, _format)
if format is None:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
else:
if not isinstance(format, basestr):
raise ValueError('ambigous suffix! (%s -> %s)' % (ext, format))
if format not in io_table:
raise ValueError('unknown output mesh format! (%s)' % format)
if writable and ('w' not in supported_capabilities[format]):
output_writable_meshes()
msg = 'write support not implemented for output mesh format "%s",' \
' see above!' \
% format
raise ValueError(msg)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
io = io_table[format](filename)
return io | python | def for_format(filename, format=None, writable=False, prefix_dir=None):
"""
Create a MeshIO instance for file `filename` with forced `format`.
Parameters
----------
filename : str
The name of the mesh file.
format : str
One of supported formats. If None,
:func:`MeshIO.any_from_filename()` is called instead.
writable : bool
If True, verify that the mesh format is writable.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the `format`.
"""
ext = op.splitext(filename)[1].lower()
try:
_format = supported_formats[ext]
except KeyError:
_format = None
format = get_default(format, _format)
if format is None:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
else:
if not isinstance(format, basestr):
raise ValueError('ambigous suffix! (%s -> %s)' % (ext, format))
if format not in io_table:
raise ValueError('unknown output mesh format! (%s)' % format)
if writable and ('w' not in supported_capabilities[format]):
output_writable_meshes()
msg = 'write support not implemented for output mesh format "%s",' \
' see above!' \
% format
raise ValueError(msg)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
io = io_table[format](filename)
return io | ['def', 'for_format', '(', 'filename', ',', 'format', '=', 'None', ',', 'writable', '=', 'False', ',', 'prefix_dir', '=', 'None', ')', ':', 'ext', '=', 'op', '.', 'splitext', '(', 'filename', ')', '[', '1', ']', '.', 'lower', '(', ')', 'try', ':', '_format', '=', 'supported_formats', '[', 'ext', ']', 'except', 'KeyError', ':', '_format', '=', 'None', 'format', '=', 'get_default', '(', 'format', ',', '_format', ')', 'if', 'format', 'is', 'None', ':', 'io', '=', 'MeshIO', '.', 'any_from_filename', '(', 'filename', ',', 'prefix_dir', '=', 'prefix_dir', ')', 'else', ':', 'if', 'not', 'isinstance', '(', 'format', ',', 'basestr', ')', ':', 'raise', 'ValueError', '(', "'ambigous suffix! (%s -> %s)'", '%', '(', 'ext', ',', 'format', ')', ')', 'if', 'format', 'not', 'in', 'io_table', ':', 'raise', 'ValueError', '(', "'unknown output mesh format! (%s)'", '%', 'format', ')', 'if', 'writable', 'and', '(', "'w'", 'not', 'in', 'supported_capabilities', '[', 'format', ']', ')', ':', 'output_writable_meshes', '(', ')', 'msg', '=', '\'write support not implemented for output mesh format "%s",\'', "' see above!'", '%', 'format', 'raise', 'ValueError', '(', 'msg', ')', 'if', 'prefix_dir', 'is', 'not', 'None', ':', 'filename', '=', 'op', '.', 'normpath', '(', 'op', '.', 'join', '(', 'prefix_dir', ',', 'filename', ')', ')', 'io', '=', 'io_table', '[', 'format', ']', '(', 'filename', ')', 'return', 'io'] | Create a MeshIO instance for file `filename` with forced `format`.
Parameters
----------
filename : str
The name of the mesh file.
format : str
One of supported formats. If None,
:func:`MeshIO.any_from_filename()` is called instead.
writable : bool
If True, verify that the mesh format is writable.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the `format`. | ['Create', 'a', 'MeshIO', 'instance', 'for', 'file', 'filename', 'with', 'forced', 'format', '.'] | train | https://github.com/vlukes/dicom2fem/blob/3056c977ca7119e01984d3aa0c4448a1c6c2430f/dicom2fem/meshio.py#L2614-L2665 |
7,448 | tailhook/injections | injections/core.py | Container.interconnect_all | def interconnect_all(self):
"""Propagate dependencies for provided instances"""
for dep in topologically_sorted(self._provides):
if hasattr(dep, '__injections__') and not hasattr(dep, '__injections_source__'):
self.inject(dep) | python | def interconnect_all(self):
"""Propagate dependencies for provided instances"""
for dep in topologically_sorted(self._provides):
if hasattr(dep, '__injections__') and not hasattr(dep, '__injections_source__'):
self.inject(dep) | ['def', 'interconnect_all', '(', 'self', ')', ':', 'for', 'dep', 'in', 'topologically_sorted', '(', 'self', '.', '_provides', ')', ':', 'if', 'hasattr', '(', 'dep', ',', "'__injections__'", ')', 'and', 'not', 'hasattr', '(', 'dep', ',', "'__injections_source__'", ')', ':', 'self', '.', 'inject', '(', 'dep', ')'] | Propagate dependencies for provided instances | ['Propagate', 'dependencies', 'for', 'provided', 'instances'] | train | https://github.com/tailhook/injections/blob/564e077ad5445c12952a92fe1f685f8c6a8d2667/injections/core.py#L146-L150 |
7,449 | ph4r05/monero-serialize | monero_serialize/xmrrpc.py | Modeler.container_load | async def container_load(self, container_type, params=None, container=None, obj=None):
"""
Loads container of elements from the reader. Supports the container ref.
Returns loaded container.
:param container_type:
:param params:
:param container:
:param obj:
:return:
"""
if isinstance(obj, IModel):
obj = obj.val
if obj is None:
return NoSetSentinel()
c_len = len(obj)
elem_type = params[0] if params else None
if elem_type is None:
elem_type = container_type.ELEM_TYPE
res = container if container else []
for i in range(c_len):
try:
self.tracker.push_index(i)
fvalue = await self._load_field(elem_type,
params[1:] if params else None,
x.eref(res, i) if container else None, obj=obj[i])
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
if not container and not isinstance(fvalue, NoSetSentinel):
res.append(fvalue)
return res | python | async def container_load(self, container_type, params=None, container=None, obj=None):
"""
Loads container of elements from the reader. Supports the container ref.
Returns loaded container.
:param container_type:
:param params:
:param container:
:param obj:
:return:
"""
if isinstance(obj, IModel):
obj = obj.val
if obj is None:
return NoSetSentinel()
c_len = len(obj)
elem_type = params[0] if params else None
if elem_type is None:
elem_type = container_type.ELEM_TYPE
res = container if container else []
for i in range(c_len):
try:
self.tracker.push_index(i)
fvalue = await self._load_field(elem_type,
params[1:] if params else None,
x.eref(res, i) if container else None, obj=obj[i])
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
if not container and not isinstance(fvalue, NoSetSentinel):
res.append(fvalue)
return res | ['async', 'def', 'container_load', '(', 'self', ',', 'container_type', ',', 'params', '=', 'None', ',', 'container', '=', 'None', ',', 'obj', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'obj', ',', 'IModel', ')', ':', 'obj', '=', 'obj', '.', 'val', 'if', 'obj', 'is', 'None', ':', 'return', 'NoSetSentinel', '(', ')', 'c_len', '=', 'len', '(', 'obj', ')', 'elem_type', '=', 'params', '[', '0', ']', 'if', 'params', 'else', 'None', 'if', 'elem_type', 'is', 'None', ':', 'elem_type', '=', 'container_type', '.', 'ELEM_TYPE', 'res', '=', 'container', 'if', 'container', 'else', '[', ']', 'for', 'i', 'in', 'range', '(', 'c_len', ')', ':', 'try', ':', 'self', '.', 'tracker', '.', 'push_index', '(', 'i', ')', 'fvalue', '=', 'await', 'self', '.', '_load_field', '(', 'elem_type', ',', 'params', '[', '1', ':', ']', 'if', 'params', 'else', 'None', ',', 'x', '.', 'eref', '(', 'res', ',', 'i', ')', 'if', 'container', 'else', 'None', ',', 'obj', '=', 'obj', '[', 'i', ']', ')', 'self', '.', 'tracker', '.', 'pop', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'helpers', '.', 'ArchiveException', '(', 'e', ',', 'tracker', '=', 'self', '.', 'tracker', ')', 'from', 'e', 'if', 'not', 'container', 'and', 'not', 'isinstance', '(', 'fvalue', ',', 'NoSetSentinel', ')', ':', 'res', '.', 'append', '(', 'fvalue', ')', 'return', 'res'] | Loads container of elements from the reader. Supports the container ref.
Returns loaded container.
:param container_type:
:param params:
:param container:
:param obj:
:return: | ['Loads', 'container', 'of', 'elements', 'from', 'the', 'reader', '.', 'Supports', 'the', 'container', 'ref', '.', 'Returns', 'loaded', 'container', '.'] | train | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L1064-L1101 |
7,450 | Microsoft/nni | tools/nni_cmd/config_utils.py | Experiments.add_experiment | def add_experiment(self, id, port, time, file_name, platform):
'''set {key:value} paris to self.experiment'''
self.experiments[id] = {}
self.experiments[id]['port'] = port
self.experiments[id]['startTime'] = time
self.experiments[id]['endTime'] = 'N/A'
self.experiments[id]['status'] = 'INITIALIZED'
self.experiments[id]['fileName'] = file_name
self.experiments[id]['platform'] = platform
self.write_file() | python | def add_experiment(self, id, port, time, file_name, platform):
'''set {key:value} paris to self.experiment'''
self.experiments[id] = {}
self.experiments[id]['port'] = port
self.experiments[id]['startTime'] = time
self.experiments[id]['endTime'] = 'N/A'
self.experiments[id]['status'] = 'INITIALIZED'
self.experiments[id]['fileName'] = file_name
self.experiments[id]['platform'] = platform
self.write_file() | ['def', 'add_experiment', '(', 'self', ',', 'id', ',', 'port', ',', 'time', ',', 'file_name', ',', 'platform', ')', ':', 'self', '.', 'experiments', '[', 'id', ']', '=', '{', '}', 'self', '.', 'experiments', '[', 'id', ']', '[', "'port'", ']', '=', 'port', 'self', '.', 'experiments', '[', 'id', ']', '[', "'startTime'", ']', '=', 'time', 'self', '.', 'experiments', '[', 'id', ']', '[', "'endTime'", ']', '=', "'N/A'", 'self', '.', 'experiments', '[', 'id', ']', '[', "'status'", ']', '=', "'INITIALIZED'", 'self', '.', 'experiments', '[', 'id', ']', '[', "'fileName'", ']', '=', 'file_name', 'self', '.', 'experiments', '[', 'id', ']', '[', "'platform'", ']', '=', 'platform', 'self', '.', 'write_file', '(', ')'] | set {key:value} paris to self.experiment | ['set', '{', 'key', ':', 'value', '}', 'paris', 'to', 'self', '.', 'experiment'] | train | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_cmd/config_utils.py#L76-L85 |
7,451 | stevepeak/tornpsql | tornpsql/__init__.py | _Connection.execute | def execute(self, query, *parameters, **kwargs):
"""Same as query, but do not process results. Always returns `None`."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters, kwargs)
except:
raise
finally:
cursor.close() | python | def execute(self, query, *parameters, **kwargs):
"""Same as query, but do not process results. Always returns `None`."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters, kwargs)
except:
raise
finally:
cursor.close() | ['def', 'execute', '(', 'self', ',', 'query', ',', '*', 'parameters', ',', '*', '*', 'kwargs', ')', ':', 'cursor', '=', 'self', '.', '_cursor', '(', ')', 'try', ':', 'self', '.', '_execute', '(', 'cursor', ',', 'query', ',', 'parameters', ',', 'kwargs', ')', 'except', ':', 'raise', 'finally', ':', 'cursor', '.', 'close', '(', ')'] | Same as query, but do not process results. Always returns `None`. | ['Same', 'as', 'query', 'but', 'do', 'not', 'process', 'results', '.', 'Always', 'returns', 'None', '.'] | train | https://github.com/stevepeak/tornpsql/blob/a109d0f95d6432d0e3b5eba1c9854357ba527f27/tornpsql/__init__.py#L219-L229 |
7,452 | photo/openphoto-python | trovebox/objects/action.py | Action.delete | def delete(self, **kwds):
"""
Endpoint: /action/<id>/delete.json
Deletes this action.
Returns True if successful.
Raises a TroveboxError if not.
"""
result = self._client.action.delete(self, **kwds)
self._delete_fields()
return result | python | def delete(self, **kwds):
"""
Endpoint: /action/<id>/delete.json
Deletes this action.
Returns True if successful.
Raises a TroveboxError if not.
"""
result = self._client.action.delete(self, **kwds)
self._delete_fields()
return result | ['def', 'delete', '(', 'self', ',', '*', '*', 'kwds', ')', ':', 'result', '=', 'self', '.', '_client', '.', 'action', '.', 'delete', '(', 'self', ',', '*', '*', 'kwds', ')', 'self', '.', '_delete_fields', '(', ')', 'return', 'result'] | Endpoint: /action/<id>/delete.json
Deletes this action.
Returns True if successful.
Raises a TroveboxError if not. | ['Endpoint', ':', '/', 'action', '/', '<id', '>', '/', 'delete', '.', 'json'] | train | https://github.com/photo/openphoto-python/blob/209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b/trovebox/objects/action.py#L27-L37 |
7,453 | globality-corp/microcosm | microcosm/metadata.py | Metadata.get_root_path | def get_root_path(self, name):
"""
Attempt to compute a root path for a (hopefully importable) name.
Based in part on Flask's `root_path` calculation. See:
https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py#L777
"""
module = modules.get(name)
if module is not None and hasattr(module, '__file__'):
return dirname(abspath(module.__file__))
# Flask keeps looking at this point. We instead set the root path to None,
# assume that the user doesn't need resource loading, and raise an error
# when resolving the resource path.
return None | python | def get_root_path(self, name):
"""
Attempt to compute a root path for a (hopefully importable) name.
Based in part on Flask's `root_path` calculation. See:
https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py#L777
"""
module = modules.get(name)
if module is not None and hasattr(module, '__file__'):
return dirname(abspath(module.__file__))
# Flask keeps looking at this point. We instead set the root path to None,
# assume that the user doesn't need resource loading, and raise an error
# when resolving the resource path.
return None | ['def', 'get_root_path', '(', 'self', ',', 'name', ')', ':', 'module', '=', 'modules', '.', 'get', '(', 'name', ')', 'if', 'module', 'is', 'not', 'None', 'and', 'hasattr', '(', 'module', ',', "'__file__'", ')', ':', 'return', 'dirname', '(', 'abspath', '(', 'module', '.', '__file__', ')', ')', '# Flask keeps looking at this point. We instead set the root path to None,', "# assume that the user doesn't need resource loading, and raise an error", '# when resolving the resource path.', 'return', 'None'] | Attempt to compute a root path for a (hopefully importable) name.
Based in part on Flask's `root_path` calculation. See:
https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py#L777 | ['Attempt', 'to', 'compute', 'a', 'root', 'path', 'for', 'a', '(', 'hopefully', 'importable', ')', 'name', '.'] | train | https://github.com/globality-corp/microcosm/blob/6856200ca295da4269c8c1c9de7db0b97c1f4523/microcosm/metadata.py#L32-L48 |
7,454 | DLR-RM/RAFCON | source/rafcon/gui/controllers/preferences_window.py | PreferencesWindowController._on_add_library | def _on_add_library(self, *event):
"""Callback method handling the addition of a new library
"""
self.view['library_tree_view'].grab_focus()
if react_to_event(self.view, self.view['library_tree_view'], event):
temp_library_name = "<LIB_NAME_%s>" % self._lib_counter
self._lib_counter += 1
library_config = self.core_config_model.get_current_config_value("LIBRARY_PATHS", use_preliminary=True,
default={})
library_config[temp_library_name] = "<LIB_PATH>"
self.core_config_model.set_preliminary_config_value("LIBRARY_PATHS", library_config)
self._select_row_by_column_value(self.view['library_tree_view'], self.library_list_store,
self.KEY_STORAGE_ID, temp_library_name)
return True | python | def _on_add_library(self, *event):
"""Callback method handling the addition of a new library
"""
self.view['library_tree_view'].grab_focus()
if react_to_event(self.view, self.view['library_tree_view'], event):
temp_library_name = "<LIB_NAME_%s>" % self._lib_counter
self._lib_counter += 1
library_config = self.core_config_model.get_current_config_value("LIBRARY_PATHS", use_preliminary=True,
default={})
library_config[temp_library_name] = "<LIB_PATH>"
self.core_config_model.set_preliminary_config_value("LIBRARY_PATHS", library_config)
self._select_row_by_column_value(self.view['library_tree_view'], self.library_list_store,
self.KEY_STORAGE_ID, temp_library_name)
return True | ['def', '_on_add_library', '(', 'self', ',', '*', 'event', ')', ':', 'self', '.', 'view', '[', "'library_tree_view'", ']', '.', 'grab_focus', '(', ')', 'if', 'react_to_event', '(', 'self', '.', 'view', ',', 'self', '.', 'view', '[', "'library_tree_view'", ']', ',', 'event', ')', ':', 'temp_library_name', '=', '"<LIB_NAME_%s>"', '%', 'self', '.', '_lib_counter', 'self', '.', '_lib_counter', '+=', '1', 'library_config', '=', 'self', '.', 'core_config_model', '.', 'get_current_config_value', '(', '"LIBRARY_PATHS"', ',', 'use_preliminary', '=', 'True', ',', 'default', '=', '{', '}', ')', 'library_config', '[', 'temp_library_name', ']', '=', '"<LIB_PATH>"', 'self', '.', 'core_config_model', '.', 'set_preliminary_config_value', '(', '"LIBRARY_PATHS"', ',', 'library_config', ')', 'self', '.', '_select_row_by_column_value', '(', 'self', '.', 'view', '[', "'library_tree_view'", ']', ',', 'self', '.', 'library_list_store', ',', 'self', '.', 'KEY_STORAGE_ID', ',', 'temp_library_name', ')', 'return', 'True'] | Callback method handling the addition of a new library | ['Callback', 'method', 'handling', 'the', 'addition', 'of', 'a', 'new', 'library'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/preferences_window.py#L297-L310 |
7,455 | jedie/PyHardLinkBackup | PyHardLinkBackup/phlb/phlb_main.py | HardLinkBackup.print_update | def print_update(self):
"""
print some status information in between.
"""
print("\r\n")
now = datetime.datetime.now()
print("Update info: (from: %s)" % now.strftime("%c"))
current_total_size = self.total_stined_bytes + self.total_new_bytes
if self.total_errored_items:
print(" * WARNING: %i omitted files!" % self.total_errored_items)
print(" * fast backup: %i files" % self.total_fast_backup)
print(
" * new content saved: %i files (%s %.1f%%)"
% (
self.total_new_file_count,
human_filesize(self.total_new_bytes),
to_percent(self.total_new_bytes, current_total_size),
)
)
print(
" * stint space via hardlinks: %i files (%s %.1f%%)"
% (
self.total_file_link_count,
human_filesize(self.total_stined_bytes),
to_percent(self.total_stined_bytes, current_total_size),
)
)
duration = default_timer() - self.start_time
performance = current_total_size / duration / 1024.0 / 1024.0
print(" * present performance: %.1fMB/s\n" % performance) | python | def print_update(self):
"""
print some status information in between.
"""
print("\r\n")
now = datetime.datetime.now()
print("Update info: (from: %s)" % now.strftime("%c"))
current_total_size = self.total_stined_bytes + self.total_new_bytes
if self.total_errored_items:
print(" * WARNING: %i omitted files!" % self.total_errored_items)
print(" * fast backup: %i files" % self.total_fast_backup)
print(
" * new content saved: %i files (%s %.1f%%)"
% (
self.total_new_file_count,
human_filesize(self.total_new_bytes),
to_percent(self.total_new_bytes, current_total_size),
)
)
print(
" * stint space via hardlinks: %i files (%s %.1f%%)"
% (
self.total_file_link_count,
human_filesize(self.total_stined_bytes),
to_percent(self.total_stined_bytes, current_total_size),
)
)
duration = default_timer() - self.start_time
performance = current_total_size / duration / 1024.0 / 1024.0
print(" * present performance: %.1fMB/s\n" % performance) | ['def', 'print_update', '(', 'self', ')', ':', 'print', '(', '"\\r\\n"', ')', 'now', '=', 'datetime', '.', 'datetime', '.', 'now', '(', ')', 'print', '(', '"Update info: (from: %s)"', '%', 'now', '.', 'strftime', '(', '"%c"', ')', ')', 'current_total_size', '=', 'self', '.', 'total_stined_bytes', '+', 'self', '.', 'total_new_bytes', 'if', 'self', '.', 'total_errored_items', ':', 'print', '(', '" * WARNING: %i omitted files!"', '%', 'self', '.', 'total_errored_items', ')', 'print', '(', '" * fast backup: %i files"', '%', 'self', '.', 'total_fast_backup', ')', 'print', '(', '" * new content saved: %i files (%s %.1f%%)"', '%', '(', 'self', '.', 'total_new_file_count', ',', 'human_filesize', '(', 'self', '.', 'total_new_bytes', ')', ',', 'to_percent', '(', 'self', '.', 'total_new_bytes', ',', 'current_total_size', ')', ',', ')', ')', 'print', '(', '" * stint space via hardlinks: %i files (%s %.1f%%)"', '%', '(', 'self', '.', 'total_file_link_count', ',', 'human_filesize', '(', 'self', '.', 'total_stined_bytes', ')', ',', 'to_percent', '(', 'self', '.', 'total_stined_bytes', ',', 'current_total_size', ')', ',', ')', ')', 'duration', '=', 'default_timer', '(', ')', '-', 'self', '.', 'start_time', 'performance', '=', 'current_total_size', '/', 'duration', '/', '1024.0', '/', '1024.0', 'print', '(', '" * present performance: %.1fMB/s\\n"', '%', 'performance', ')'] | print some status information in between. | ['print', 'some', 'status', 'information', 'in', 'between', '.'] | train | https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/phlb_main.py#L551-L586 |
7,456 | ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/connect/notifications.py | AsyncConsumer.error | def error(self):
"""Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: the error value/payload, if found.
:rtype: str
"""
status_code, error_msg, payload = self.check_error()
if status_code != 200 and not error_msg and not payload:
return "Async error (%s). Status code: %r" % (self.async_id, status_code)
return error_msg | python | def error(self):
"""Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: the error value/payload, if found.
:rtype: str
"""
status_code, error_msg, payload = self.check_error()
if status_code != 200 and not error_msg and not payload:
return "Async error (%s). Status code: %r" % (self.async_id, status_code)
return error_msg | ['def', 'error', '(', 'self', ')', ':', 'status_code', ',', 'error_msg', ',', 'payload', '=', 'self', '.', 'check_error', '(', ')', 'if', 'status_code', '!=', '200', 'and', 'not', 'error_msg', 'and', 'not', 'payload', ':', 'return', '"Async error (%s). Status code: %r"', '%', '(', 'self', '.', 'async_id', ',', 'status_code', ')', 'return', 'error_msg'] | Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: the error value/payload, if found.
:rtype: str | ['Check', 'if', 'the', 'async', 'response', 'is', 'an', 'error', '.'] | train | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/connect/notifications.py#L126-L140 |
7,457 | Karaage-Cluster/karaage | karaage/plugins/kgapplications/views/transitions.py | TransitionOpen.get_next_action | def get_next_action(self, request, application, roles):
""" Retrieve the next state. """
application.reopen()
link, is_secret = base.get_email_link(application)
emails.send_invite_email(application, link, is_secret)
messages.success(
request,
"Sent an invitation to %s."
% application.applicant.email)
return 'success' | python | def get_next_action(self, request, application, roles):
""" Retrieve the next state. """
application.reopen()
link, is_secret = base.get_email_link(application)
emails.send_invite_email(application, link, is_secret)
messages.success(
request,
"Sent an invitation to %s."
% application.applicant.email)
return 'success' | ['def', 'get_next_action', '(', 'self', ',', 'request', ',', 'application', ',', 'roles', ')', ':', 'application', '.', 'reopen', '(', ')', 'link', ',', 'is_secret', '=', 'base', '.', 'get_email_link', '(', 'application', ')', 'emails', '.', 'send_invite_email', '(', 'application', ',', 'link', ',', 'is_secret', ')', 'messages', '.', 'success', '(', 'request', ',', '"Sent an invitation to %s."', '%', 'application', '.', 'applicant', '.', 'email', ')', 'return', "'success'"] | Retrieve the next state. | ['Retrieve', 'the', 'next', 'state', '.'] | train | https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/plugins/kgapplications/views/transitions.py#L11-L20 |
7,458 | pandas-dev/pandas | pandas/core/indexes/multi.py | MultiIndex.difference | def difference(self, other, sort=None):
"""
Compute set difference of two MultiIndex objects
Parameters
----------
other : MultiIndex
sort : False or None, default None
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
diff : MultiIndex
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
difference = this.values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names) | python | def difference(self, other, sort=None):
"""
Compute set difference of two MultiIndex objects
Parameters
----------
other : MultiIndex
sort : False or None, default None
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
diff : MultiIndex
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
difference = this.values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names) | ['def', 'difference', '(', 'self', ',', 'other', ',', 'sort', '=', 'None', ')', ':', 'self', '.', '_validate_sort_keyword', '(', 'sort', ')', 'self', '.', '_assert_can_do_setop', '(', 'other', ')', 'other', ',', 'result_names', '=', 'self', '.', '_convert_can_do_setop', '(', 'other', ')', 'if', 'len', '(', 'other', ')', '==', '0', ':', 'return', 'self', 'if', 'self', '.', 'equals', '(', 'other', ')', ':', 'return', 'MultiIndex', '(', 'levels', '=', 'self', '.', 'levels', ',', 'codes', '=', '[', '[', ']', ']', '*', 'self', '.', 'nlevels', ',', 'names', '=', 'result_names', ',', 'verify_integrity', '=', 'False', ')', 'this', '=', 'self', '.', '_get_unique_index', '(', ')', 'indexer', '=', 'this', '.', 'get_indexer', '(', 'other', ')', 'indexer', '=', 'indexer', '.', 'take', '(', '(', 'indexer', '!=', '-', '1', ')', '.', 'nonzero', '(', ')', '[', '0', ']', ')', 'label_diff', '=', 'np', '.', 'setdiff1d', '(', 'np', '.', 'arange', '(', 'this', '.', 'size', ')', ',', 'indexer', ',', 'assume_unique', '=', 'True', ')', 'difference', '=', 'this', '.', 'values', '.', 'take', '(', 'label_diff', ')', 'if', 'sort', 'is', 'None', ':', 'difference', '=', 'sorted', '(', 'difference', ')', 'if', 'len', '(', 'difference', ')', '==', '0', ':', 'return', 'MultiIndex', '(', 'levels', '=', '[', '[', ']', ']', '*', 'self', '.', 'nlevels', ',', 'codes', '=', '[', '[', ']', ']', '*', 'self', '.', 'nlevels', ',', 'names', '=', 'result_names', ',', 'verify_integrity', '=', 'False', ')', 'else', ':', 'return', 'MultiIndex', '.', 'from_tuples', '(', 'difference', ',', 'sortorder', '=', '0', ',', 'names', '=', 'result_names', ')'] | Compute set difference of two MultiIndex objects
Parameters
----------
other : MultiIndex
sort : False or None, default None
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
diff : MultiIndex | ['Compute', 'set', 'difference', 'of', 'two', 'MultiIndex', 'objects'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2982-L3032 |
7,459 | cohorte/cohorte-herald | python/herald/transports/http/discovery_multicast.py | MulticastReceiver._handle_heartbeat | def _handle_heartbeat(self, sender, data):
"""
Handles a raw heart beat
:param sender: Sender (address, port) tuple
:param data: Raw packet data
"""
# Format of packet
parsed, data = self._unpack("<B", data)
format = parsed[0]
if format == PACKET_FORMAT_VERSION:
# Kind of beat
parsed, data = self._unpack("<B", data)
kind = parsed[0]
if kind == PACKET_TYPE_HEARTBEAT:
# Extract content
parsed, data = self._unpack("<H", data)
port = parsed[0]
path, data = self._unpack_string(data)
uid, data = self._unpack_string(data)
node_uid, data = self._unpack_string(data)
try:
app_id, data = self._unpack_string(data)
except struct.error:
# Compatibility with previous version
app_id = herald.DEFAULT_APPLICATION_ID
elif kind == PACKET_TYPE_LASTBEAT:
# Peer is going away
uid, data = self._unpack_string(data)
app_id, data = self._unpack_string(data)
port = -1
path = None
node_uid = None
else:
_logger.warning("Unknown kind of packet: %d", kind)
return
try:
self._callback(kind, uid, node_uid, app_id, sender[0], port, path)
except Exception as ex:
_logger.exception("Error handling heart beat: %s", ex) | python | def _handle_heartbeat(self, sender, data):
"""
Handles a raw heart beat
:param sender: Sender (address, port) tuple
:param data: Raw packet data
"""
# Format of packet
parsed, data = self._unpack("<B", data)
format = parsed[0]
if format == PACKET_FORMAT_VERSION:
# Kind of beat
parsed, data = self._unpack("<B", data)
kind = parsed[0]
if kind == PACKET_TYPE_HEARTBEAT:
# Extract content
parsed, data = self._unpack("<H", data)
port = parsed[0]
path, data = self._unpack_string(data)
uid, data = self._unpack_string(data)
node_uid, data = self._unpack_string(data)
try:
app_id, data = self._unpack_string(data)
except struct.error:
# Compatibility with previous version
app_id = herald.DEFAULT_APPLICATION_ID
elif kind == PACKET_TYPE_LASTBEAT:
# Peer is going away
uid, data = self._unpack_string(data)
app_id, data = self._unpack_string(data)
port = -1
path = None
node_uid = None
else:
_logger.warning("Unknown kind of packet: %d", kind)
return
try:
self._callback(kind, uid, node_uid, app_id, sender[0], port, path)
except Exception as ex:
_logger.exception("Error handling heart beat: %s", ex) | ['def', '_handle_heartbeat', '(', 'self', ',', 'sender', ',', 'data', ')', ':', '# Format of packet', 'parsed', ',', 'data', '=', 'self', '.', '_unpack', '(', '"<B"', ',', 'data', ')', 'format', '=', 'parsed', '[', '0', ']', 'if', 'format', '==', 'PACKET_FORMAT_VERSION', ':', '# Kind of beat', 'parsed', ',', 'data', '=', 'self', '.', '_unpack', '(', '"<B"', ',', 'data', ')', 'kind', '=', 'parsed', '[', '0', ']', 'if', 'kind', '==', 'PACKET_TYPE_HEARTBEAT', ':', '# Extract content', 'parsed', ',', 'data', '=', 'self', '.', '_unpack', '(', '"<H"', ',', 'data', ')', 'port', '=', 'parsed', '[', '0', ']', 'path', ',', 'data', '=', 'self', '.', '_unpack_string', '(', 'data', ')', 'uid', ',', 'data', '=', 'self', '.', '_unpack_string', '(', 'data', ')', 'node_uid', ',', 'data', '=', 'self', '.', '_unpack_string', '(', 'data', ')', 'try', ':', 'app_id', ',', 'data', '=', 'self', '.', '_unpack_string', '(', 'data', ')', 'except', 'struct', '.', 'error', ':', '# Compatibility with previous version', 'app_id', '=', 'herald', '.', 'DEFAULT_APPLICATION_ID', 'elif', 'kind', '==', 'PACKET_TYPE_LASTBEAT', ':', '# Peer is going away', 'uid', ',', 'data', '=', 'self', '.', '_unpack_string', '(', 'data', ')', 'app_id', ',', 'data', '=', 'self', '.', '_unpack_string', '(', 'data', ')', 'port', '=', '-', '1', 'path', '=', 'None', 'node_uid', '=', 'None', 'else', ':', '_logger', '.', 'warning', '(', '"Unknown kind of packet: %d"', ',', 'kind', ')', 'return', 'try', ':', 'self', '.', '_callback', '(', 'kind', ',', 'uid', ',', 'node_uid', ',', 'app_id', ',', 'sender', '[', '0', ']', ',', 'port', ',', 'path', ')', 'except', 'Exception', 'as', 'ex', ':', '_logger', '.', 'exception', '(', '"Error handling heart beat: %s"', ',', 'ex', ')'] | Handles a raw heart beat
:param sender: Sender (address, port) tuple
:param data: Raw packet data | ['Handles', 'a', 'raw', 'heart', 'beat'] | train | https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/discovery_multicast.py#L401-L443 |
7,460 | pyroscope/pyrobase | src/pyrobase/fmt.py | iso_datetime | def iso_datetime(timestamp=None):
""" Convert UNIX timestamp to ISO datetime string.
@param timestamp: UNIX epoch value (default: the current time).
@return: Timestamp formatted as "YYYY-mm-dd HH:MM:SS".
"""
if timestamp is None:
timestamp = time.time()
return datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:19] | python | def iso_datetime(timestamp=None):
""" Convert UNIX timestamp to ISO datetime string.
@param timestamp: UNIX epoch value (default: the current time).
@return: Timestamp formatted as "YYYY-mm-dd HH:MM:SS".
"""
if timestamp is None:
timestamp = time.time()
return datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:19] | ['def', 'iso_datetime', '(', 'timestamp', '=', 'None', ')', ':', 'if', 'timestamp', 'is', 'None', ':', 'timestamp', '=', 'time', '.', 'time', '(', ')', 'return', 'datetime', '.', 'datetime', '.', 'fromtimestamp', '(', 'timestamp', ')', '.', 'isoformat', '(', "' '", ')', '[', ':', '19', ']'] | Convert UNIX timestamp to ISO datetime string.
@param timestamp: UNIX epoch value (default: the current time).
@return: Timestamp formatted as "YYYY-mm-dd HH:MM:SS". | ['Convert', 'UNIX', 'timestamp', 'to', 'ISO', 'datetime', 'string', '.'] | train | https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/fmt.py#L54-L62 |
7,461 | torchbox/wagtail-import-export | wagtailimportexport/views.py | export | def export(request, page_id, export_unpublished=False):
"""
API endpoint of this source site to export a part of the page tree
rooted at page_id
Requests are made by a destination site's import_from_api view.
"""
try:
if export_unpublished:
root_page = Page.objects.get(id=page_id)
else:
root_page = Page.objects.get(id=page_id, live=True)
except Page.DoesNotExist:
return JsonResponse({'error': _('page not found')})
payload = export_pages(root_page, export_unpublished=export_unpublished)
return JsonResponse(payload) | python | def export(request, page_id, export_unpublished=False):
"""
API endpoint of this source site to export a part of the page tree
rooted at page_id
Requests are made by a destination site's import_from_api view.
"""
try:
if export_unpublished:
root_page = Page.objects.get(id=page_id)
else:
root_page = Page.objects.get(id=page_id, live=True)
except Page.DoesNotExist:
return JsonResponse({'error': _('page not found')})
payload = export_pages(root_page, export_unpublished=export_unpublished)
return JsonResponse(payload) | ['def', 'export', '(', 'request', ',', 'page_id', ',', 'export_unpublished', '=', 'False', ')', ':', 'try', ':', 'if', 'export_unpublished', ':', 'root_page', '=', 'Page', '.', 'objects', '.', 'get', '(', 'id', '=', 'page_id', ')', 'else', ':', 'root_page', '=', 'Page', '.', 'objects', '.', 'get', '(', 'id', '=', 'page_id', ',', 'live', '=', 'True', ')', 'except', 'Page', '.', 'DoesNotExist', ':', 'return', 'JsonResponse', '(', '{', "'error'", ':', '_', '(', "'page not found'", ')', '}', ')', 'payload', '=', 'export_pages', '(', 'root_page', ',', 'export_unpublished', '=', 'export_unpublished', ')', 'return', 'JsonResponse', '(', 'payload', ')'] | API endpoint of this source site to export a part of the page tree
rooted at page_id
Requests are made by a destination site's import_from_api view. | ['API', 'endpoint', 'of', 'this', 'source', 'site', 'to', 'export', 'a', 'part', 'of', 'the', 'page', 'tree', 'rooted', 'at', 'page_id'] | train | https://github.com/torchbox/wagtail-import-export/blob/4a4b0b0fde00e8062c52a8bc3e57cb91acfc920e/wagtailimportexport/views.py#L120-L137 |
7,462 | WebarchivCZ/WA-KAT | src/wa_kat/templates/static/js/Lib/site-packages/components/log_view2.py | LogView.hide | def hide(cls):
"""
Hide the log interface.
"""
cls.el.style.display = "none"
cls.overlay.hide()
cls.bind() | python | def hide(cls):
"""
Hide the log interface.
"""
cls.el.style.display = "none"
cls.overlay.hide()
cls.bind() | ['def', 'hide', '(', 'cls', ')', ':', 'cls', '.', 'el', '.', 'style', '.', 'display', '=', '"none"', 'cls', '.', 'overlay', '.', 'hide', '(', ')', 'cls', '.', 'bind', '(', ')'] | Hide the log interface. | ['Hide', 'the', 'log', 'interface', '.'] | train | https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/log_view2.py#L58-L64 |
7,463 | databio/pypiper | pypiper/manager.py | PipelineManager._report_profile | def _report_profile(self, command, lock_name, elapsed_time, memory):
"""
Writes a string to self.pipeline_profile_file.
"""
message_raw = str(command) + "\t " + \
str(lock_name) + "\t" + \
str(datetime.timedelta(seconds = round(elapsed_time, 2))) + "\t " + \
str(memory)
with open(self.pipeline_profile_file, "a") as myfile:
myfile.write(message_raw + "\n") | python | def _report_profile(self, command, lock_name, elapsed_time, memory):
"""
Writes a string to self.pipeline_profile_file.
"""
message_raw = str(command) + "\t " + \
str(lock_name) + "\t" + \
str(datetime.timedelta(seconds = round(elapsed_time, 2))) + "\t " + \
str(memory)
with open(self.pipeline_profile_file, "a") as myfile:
myfile.write(message_raw + "\n") | ['def', '_report_profile', '(', 'self', ',', 'command', ',', 'lock_name', ',', 'elapsed_time', ',', 'memory', ')', ':', 'message_raw', '=', 'str', '(', 'command', ')', '+', '"\\t "', '+', 'str', '(', 'lock_name', ')', '+', '"\\t"', '+', 'str', '(', 'datetime', '.', 'timedelta', '(', 'seconds', '=', 'round', '(', 'elapsed_time', ',', '2', ')', ')', ')', '+', '"\\t "', '+', 'str', '(', 'memory', ')', 'with', 'open', '(', 'self', '.', 'pipeline_profile_file', ',', '"a"', ')', 'as', 'myfile', ':', 'myfile', '.', 'write', '(', 'message_raw', '+', '"\\n"', ')'] | Writes a string to self.pipeline_profile_file. | ['Writes', 'a', 'string', 'to', 'self', '.', 'pipeline_profile_file', '.'] | train | https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1126-L1136 |
7,464 | MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/learning/holdout.py | iterative_stratification | def iterative_stratification(node_label_matrix, training_set_size, number_of_categories, random_seed=0):
"""
Iterative data fold stratification/balancing for two folds.
Based on: Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011).
On the stratification of multi-label data.
In Machine Learning and Knowledge Discovery in Databases (pp. 145-158).
Springer Berlin Heidelberg.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- training_set_size: The minimum required size for the training set.
- number_of_categories: The number of categories/classes in the learning.
- random_seed: A seed for numpy random.
Outputs: - train_set: A NumPy array containing the training set node ids.
- test_set: A NumPy array containing the testing set node ids.
"""
number_of_labelled_nodes = node_label_matrix.shape[0]
testing_set_size = number_of_labelled_nodes - training_set_size
training_set_proportion = training_set_size/number_of_labelled_nodes
testing_set_proportion = testing_set_size/number_of_labelled_nodes
# Calculate the desired number of examples of each label at each subset.
desired_label_number = np.zeros((2, number_of_categories), dtype=np.int64)
node_label_matrix = node_label_matrix.tocsc()
for j in range(number_of_categories):
category_label_number = node_label_matrix.getcol(j).indices.size
desired_label_number[0, j] = math.ceil(category_label_number*training_set_proportion)
desired_label_number[1, j] = category_label_number - desired_label_number[0, j]
train_ids = list()
test_ids = list()
append_train_id = train_ids.append
append_test_id = test_ids.append
# Randomize process
np.random.seed(random_seed)
while True:
if len(train_ids) + len(test_ids) >= number_of_labelled_nodes:
break
# Find the label with the fewest (but at least one) remaining examples, breaking the ties randomly
remaining_label_distribution = desired_label_number.sum(axis=0)
min_label = np.min(remaining_label_distribution[np.where(remaining_label_distribution > 0)[0]])
label_indices = np.where(remaining_label_distribution == min_label)[0]
chosen_label = int(np.random.choice(label_indices, 1)[0])
# Find the subset with the largest number of desired examples for this label,
# breaking ties by considering the largest number of desired examples, breaking further ties randomly.
fold_max_remaining_labels = np.max(desired_label_number[:, chosen_label])
fold_indices = np.where(desired_label_number[:, chosen_label] == fold_max_remaining_labels)[0]
chosen_fold = int(np.random.choice(fold_indices, 1)[0])
# Choose a random example for the selected label.
relevant_nodes = node_label_matrix.getcol(chosen_label).indices
chosen_node = int(np.random.choice(np.setdiff1d(relevant_nodes,
np.union1d(np.array(train_ids),
np.array(test_ids))),
1)[0])
if chosen_fold == 0:
append_train_id(chosen_node)
desired_label_number[0, node_label_matrix.getrow(chosen_node).indices] -= 1
elif chosen_fold == 1:
append_test_id(chosen_node)
desired_label_number[1, node_label_matrix.getrow(chosen_node).indices] -= 1
else:
raise RuntimeError
return np.array(train_ids), np.array(test_ids) | python | def iterative_stratification(node_label_matrix, training_set_size, number_of_categories, random_seed=0):
"""
Iterative data fold stratification/balancing for two folds.
Based on: Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011).
On the stratification of multi-label data.
In Machine Learning and Knowledge Discovery in Databases (pp. 145-158).
Springer Berlin Heidelberg.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- training_set_size: The minimum required size for the training set.
- number_of_categories: The number of categories/classes in the learning.
- random_seed: A seed for numpy random.
Outputs: - train_set: A NumPy array containing the training set node ids.
- test_set: A NumPy array containing the testing set node ids.
"""
number_of_labelled_nodes = node_label_matrix.shape[0]
testing_set_size = number_of_labelled_nodes - training_set_size
training_set_proportion = training_set_size/number_of_labelled_nodes
testing_set_proportion = testing_set_size/number_of_labelled_nodes
# Calculate the desired number of examples of each label at each subset.
desired_label_number = np.zeros((2, number_of_categories), dtype=np.int64)
node_label_matrix = node_label_matrix.tocsc()
for j in range(number_of_categories):
category_label_number = node_label_matrix.getcol(j).indices.size
desired_label_number[0, j] = math.ceil(category_label_number*training_set_proportion)
desired_label_number[1, j] = category_label_number - desired_label_number[0, j]
train_ids = list()
test_ids = list()
append_train_id = train_ids.append
append_test_id = test_ids.append
# Randomize process
np.random.seed(random_seed)
while True:
if len(train_ids) + len(test_ids) >= number_of_labelled_nodes:
break
# Find the label with the fewest (but at least one) remaining examples, breaking the ties randomly
remaining_label_distribution = desired_label_number.sum(axis=0)
min_label = np.min(remaining_label_distribution[np.where(remaining_label_distribution > 0)[0]])
label_indices = np.where(remaining_label_distribution == min_label)[0]
chosen_label = int(np.random.choice(label_indices, 1)[0])
# Find the subset with the largest number of desired examples for this label,
# breaking ties by considering the largest number of desired examples, breaking further ties randomly.
fold_max_remaining_labels = np.max(desired_label_number[:, chosen_label])
fold_indices = np.where(desired_label_number[:, chosen_label] == fold_max_remaining_labels)[0]
chosen_fold = int(np.random.choice(fold_indices, 1)[0])
# Choose a random example for the selected label.
relevant_nodes = node_label_matrix.getcol(chosen_label).indices
chosen_node = int(np.random.choice(np.setdiff1d(relevant_nodes,
np.union1d(np.array(train_ids),
np.array(test_ids))),
1)[0])
if chosen_fold == 0:
append_train_id(chosen_node)
desired_label_number[0, node_label_matrix.getrow(chosen_node).indices] -= 1
elif chosen_fold == 1:
append_test_id(chosen_node)
desired_label_number[1, node_label_matrix.getrow(chosen_node).indices] -= 1
else:
raise RuntimeError
return np.array(train_ids), np.array(test_ids) | ['def', 'iterative_stratification', '(', 'node_label_matrix', ',', 'training_set_size', ',', 'number_of_categories', ',', 'random_seed', '=', '0', ')', ':', 'number_of_labelled_nodes', '=', 'node_label_matrix', '.', 'shape', '[', '0', ']', 'testing_set_size', '=', 'number_of_labelled_nodes', '-', 'training_set_size', 'training_set_proportion', '=', 'training_set_size', '/', 'number_of_labelled_nodes', 'testing_set_proportion', '=', 'testing_set_size', '/', 'number_of_labelled_nodes', '# Calculate the desired number of examples of each label at each subset.', 'desired_label_number', '=', 'np', '.', 'zeros', '(', '(', '2', ',', 'number_of_categories', ')', ',', 'dtype', '=', 'np', '.', 'int64', ')', 'node_label_matrix', '=', 'node_label_matrix', '.', 'tocsc', '(', ')', 'for', 'j', 'in', 'range', '(', 'number_of_categories', ')', ':', 'category_label_number', '=', 'node_label_matrix', '.', 'getcol', '(', 'j', ')', '.', 'indices', '.', 'size', 'desired_label_number', '[', '0', ',', 'j', ']', '=', 'math', '.', 'ceil', '(', 'category_label_number', '*', 'training_set_proportion', ')', 'desired_label_number', '[', '1', ',', 'j', ']', '=', 'category_label_number', '-', 'desired_label_number', '[', '0', ',', 'j', ']', 'train_ids', '=', 'list', '(', ')', 'test_ids', '=', 'list', '(', ')', 'append_train_id', '=', 'train_ids', '.', 'append', 'append_test_id', '=', 'test_ids', '.', 'append', '# Randomize process', 'np', '.', 'random', '.', 'seed', '(', 'random_seed', ')', 'while', 'True', ':', 'if', 'len', '(', 'train_ids', ')', '+', 'len', '(', 'test_ids', ')', '>=', 'number_of_labelled_nodes', ':', 'break', '# Find the label with the fewest (but at least one) remaining examples, breaking the ties randomly', 'remaining_label_distribution', '=', 'desired_label_number', '.', 'sum', '(', 'axis', '=', '0', ')', 'min_label', '=', 'np', '.', 'min', '(', 'remaining_label_distribution', '[', 'np', '.', 'where', '(', 'remaining_label_distribution', '>', '0', ')', '[', '0', ']', ']', ')', 'label_indices', '=', 'np', '.', 'where', '(', 'remaining_label_distribution', '==', 'min_label', ')', '[', '0', ']', 'chosen_label', '=', 'int', '(', 'np', '.', 'random', '.', 'choice', '(', 'label_indices', ',', '1', ')', '[', '0', ']', ')', '# Find the subset with the largest number of desired examples for this label,', '# breaking ties by considering the largest number of desired examples, breaking further ties randomly.', 'fold_max_remaining_labels', '=', 'np', '.', 'max', '(', 'desired_label_number', '[', ':', ',', 'chosen_label', ']', ')', 'fold_indices', '=', 'np', '.', 'where', '(', 'desired_label_number', '[', ':', ',', 'chosen_label', ']', '==', 'fold_max_remaining_labels', ')', '[', '0', ']', 'chosen_fold', '=', 'int', '(', 'np', '.', 'random', '.', 'choice', '(', 'fold_indices', ',', '1', ')', '[', '0', ']', ')', '# Choose a random example for the selected label.', 'relevant_nodes', '=', 'node_label_matrix', '.', 'getcol', '(', 'chosen_label', ')', '.', 'indices', 'chosen_node', '=', 'int', '(', 'np', '.', 'random', '.', 'choice', '(', 'np', '.', 'setdiff1d', '(', 'relevant_nodes', ',', 'np', '.', 'union1d', '(', 'np', '.', 'array', '(', 'train_ids', ')', ',', 'np', '.', 'array', '(', 'test_ids', ')', ')', ')', ',', '1', ')', '[', '0', ']', ')', 'if', 'chosen_fold', '==', '0', ':', 'append_train_id', '(', 'chosen_node', ')', 'desired_label_number', '[', '0', ',', 'node_label_matrix', '.', 'getrow', '(', 'chosen_node', ')', '.', 'indices', ']', '-=', '1', 'elif', 'chosen_fold', '==', '1', ':', 'append_test_id', '(', 'chosen_node', ')', 'desired_label_number', '[', '1', ',', 'node_label_matrix', '.', 'getrow', '(', 'chosen_node', ')', '.', 'indices', ']', '-=', '1', 'else', ':', 'raise', 'RuntimeError', 'return', 'np', '.', 'array', '(', 'train_ids', ')', ',', 'np', '.', 'array', '(', 'test_ids', ')'] | Iterative data fold stratification/balancing for two folds.
Based on: Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011).
On the stratification of multi-label data.
In Machine Learning and Knowledge Discovery in Databases (pp. 145-158).
Springer Berlin Heidelberg.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- training_set_size: The minimum required size for the training set.
- number_of_categories: The number of categories/classes in the learning.
- random_seed: A seed for numpy random.
Outputs: - train_set: A NumPy array containing the training set node ids.
- test_set: A NumPy array containing the testing set node ids. | ['Iterative', 'data', 'fold', 'stratification', '/', 'balancing', 'for', 'two', 'folds', '.'] | train | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/learning/holdout.py#L202-L270 |
7,465 | klmitch/turnstile | turnstile/control.py | LimitData.get_limits | def get_limits(self, limit_sum=None):
"""
Gets the current limit data if it is different from the data
indicated by limit_sum. The db argument is used for hydrating
the limit objects. Raises a NoChangeException if the
limit_sum represents no change, otherwise returns a tuple
consisting of the current limit_sum and a list of Limit
objects.
"""
with self.limit_lock:
# Any changes?
if limit_sum and self.limit_sum == limit_sum:
raise NoChangeException()
# Return a tuple of the limits and limit sum
return (self.limit_sum, self.limit_data) | python | def get_limits(self, limit_sum=None):
"""
Gets the current limit data if it is different from the data
indicated by limit_sum. The db argument is used for hydrating
the limit objects. Raises a NoChangeException if the
limit_sum represents no change, otherwise returns a tuple
consisting of the current limit_sum and a list of Limit
objects.
"""
with self.limit_lock:
# Any changes?
if limit_sum and self.limit_sum == limit_sum:
raise NoChangeException()
# Return a tuple of the limits and limit sum
return (self.limit_sum, self.limit_data) | ['def', 'get_limits', '(', 'self', ',', 'limit_sum', '=', 'None', ')', ':', 'with', 'self', '.', 'limit_lock', ':', '# Any changes?', 'if', 'limit_sum', 'and', 'self', '.', 'limit_sum', '==', 'limit_sum', ':', 'raise', 'NoChangeException', '(', ')', '# Return a tuple of the limits and limit sum', 'return', '(', 'self', '.', 'limit_sum', ',', 'self', '.', 'limit_data', ')'] | Gets the current limit data if it is different from the data
indicated by limit_sum. The db argument is used for hydrating
the limit objects. Raises a NoChangeException if the
limit_sum represents no change, otherwise returns a tuple
consisting of the current limit_sum and a list of Limit
objects. | ['Gets', 'the', 'current', 'limit', 'data', 'if', 'it', 'is', 'different', 'from', 'the', 'data', 'indicated', 'by', 'limit_sum', '.', 'The', 'db', 'argument', 'is', 'used', 'for', 'hydrating', 'the', 'limit', 'objects', '.', 'Raises', 'a', 'NoChangeException', 'if', 'the', 'limit_sum', 'represents', 'no', 'change', 'otherwise', 'returns', 'a', 'tuple', 'consisting', 'of', 'the', 'current', 'limit_sum', 'and', 'a', 'list', 'of', 'Limit', 'objects', '.'] | train | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L82-L98 |
7,466 | python-cmd2/cmd2 | examples/tab_autocomp_dynamic.py | TabCompleteExample.do_video | def do_video(self, args):
"""Video management command demonstrates multiple layers of sub-commands being handled by AutoCompleter"""
func = getattr(args, 'func', None)
if func is not None:
# Call whatever subcommand function was selected
func(self, args)
else:
# No subcommand was provided, so call help
self.do_help('video') | python | def do_video(self, args):
"""Video management command demonstrates multiple layers of sub-commands being handled by AutoCompleter"""
func = getattr(args, 'func', None)
if func is not None:
# Call whatever subcommand function was selected
func(self, args)
else:
# No subcommand was provided, so call help
self.do_help('video') | ['def', 'do_video', '(', 'self', ',', 'args', ')', ':', 'func', '=', 'getattr', '(', 'args', ',', "'func'", ',', 'None', ')', 'if', 'func', 'is', 'not', 'None', ':', '# Call whatever subcommand function was selected', 'func', '(', 'self', ',', 'args', ')', 'else', ':', '# No subcommand was provided, so call help', 'self', '.', 'do_help', '(', "'video'", ')'] | Video management command demonstrates multiple layers of sub-commands being handled by AutoCompleter | ['Video', 'management', 'command', 'demonstrates', 'multiple', 'layers', 'of', 'sub', '-', 'commands', 'being', 'handled', 'by', 'AutoCompleter'] | train | https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/examples/tab_autocomp_dynamic.py#L223-L231 |
7,467 | anlutro/russell | russell/engine.py | BlogEngine.copy_assets | def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to) | python | def copy_assets(self, path='assets'):
"""
Copy assets into the destination directory.
"""
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to) | ['def', 'copy_assets', '(', 'self', ',', 'path', '=', "'assets'", ')', ':', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'root_path', ',', 'path', ')', 'for', 'root', ',', '_', ',', 'files', 'in', 'os', '.', 'walk', '(', 'path', ')', ':', 'for', 'file', 'in', 'files', ':', 'fullpath', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'file', ')', 'relpath', '=', 'os', '.', 'path', '.', 'relpath', '(', 'fullpath', ',', 'path', ')', 'copy_to', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', '_get_dist_path', '(', 'relpath', ',', 'directory', '=', "'assets'", ')', ')', 'LOG', '.', 'debug', '(', "'copying %r to %r'", ',', 'fullpath', ',', 'copy_to', ')', 'shutil', '.', 'copyfile', '(', 'fullpath', ',', 'copy_to', ')'] | Copy assets into the destination directory. | ['Copy', 'assets', 'into', 'the', 'destination', 'directory', '.'] | train | https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L121-L132 |
7,468 | fbcotter/py3nvml | py3nvml/py3nvml.py | nvmlDeviceGetBoardId | def nvmlDeviceGetBoardId(handle):
r"""
/**
* Retrieves the device boardId from 0-N.
* Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with
* \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well.
* The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across
* reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and
* the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will
* always return those values but they will always be different from each other).
*
*
* For Fermi &tm; or newer fully supported devices.
*
* @param device The identifier of the target device
* @param boardId Reference in which to return the device's board ID
*
* @return
* - \ref NVML_SUCCESS if \a boardId has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetBoardId
"""
c_id = c_uint();
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardId")
ret = fn(handle, byref(c_id))
_nvmlCheckReturn(ret)
return bytes_to_str(c_id.value) | python | def nvmlDeviceGetBoardId(handle):
r"""
/**
* Retrieves the device boardId from 0-N.
* Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with
* \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well.
* The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across
* reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and
* the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will
* always return those values but they will always be different from each other).
*
*
* For Fermi &tm; or newer fully supported devices.
*
* @param device The identifier of the target device
* @param boardId Reference in which to return the device's board ID
*
* @return
* - \ref NVML_SUCCESS if \a boardId has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetBoardId
"""
c_id = c_uint();
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardId")
ret = fn(handle, byref(c_id))
_nvmlCheckReturn(ret)
return bytes_to_str(c_id.value) | ['def', 'nvmlDeviceGetBoardId', '(', 'handle', ')', ':', 'c_id', '=', 'c_uint', '(', ')', 'fn', '=', '_nvmlGetFunctionPointer', '(', '"nvmlDeviceGetBoardId"', ')', 'ret', '=', 'fn', '(', 'handle', ',', 'byref', '(', 'c_id', ')', ')', '_nvmlCheckReturn', '(', 'ret', ')', 'return', 'bytes_to_str', '(', 'c_id', '.', 'value', ')'] | r"""
/**
* Retrieves the device boardId from 0-N.
* Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with
* \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well.
* The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across
* reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and
* the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will
* always return those values but they will always be different from each other).
*
*
* For Fermi &tm; or newer fully supported devices.
*
* @param device The identifier of the target device
* @param boardId Reference in which to return the device's board ID
*
* @return
* - \ref NVML_SUCCESS if \a boardId has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceGetBoardId | ['r', '/', '**', '*', 'Retrieves', 'the', 'device', 'boardId', 'from', '0', '-', 'N', '.', '*', 'Devices', 'with', 'the', 'same', 'boardId', 'indicate', 'GPUs', 'connected', 'to', 'the', 'same', 'PLX', '.', 'Use', 'in', 'conjunction', 'with', '*', '\\', 'ref', 'nvmlDeviceGetMultiGpuBoard', '()', 'to', 'decide', 'if', 'they', 'are', 'on', 'the', 'same', 'board', 'as', 'well', '.', '*', 'The', 'boardId', 'returned', 'is', 'a', 'unique', 'ID', 'for', 'the', 'current', 'configuration', '.', 'Uniqueness', 'and', 'ordering', 'across', '*', 'reboots', 'and', 'system', 'configurations', 'is', 'not', 'guaranteed', '(', 'i', '.', 'e', '.', 'if', 'a', 'Tesla', 'K40c', 'returns', '0x100', 'and', '*', 'the', 'two', 'GPUs', 'on', 'a', 'Tesla', 'K10', 'in', 'the', 'same', 'system', 'returns', '0x200', 'it', 'is', 'not', 'guaranteed', 'they', 'will', '*', 'always', 'return', 'those', 'values', 'but', 'they', 'will', 'always', 'be', 'different', 'from', 'each', 'other', ')', '.', '*', '*', '*', 'For', 'Fermi', '&tm', ';', 'or', 'newer', 'fully', 'supported', 'devices', '.', '*', '*'] | train | https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L2095-L2126 |
7,469 | Karaage-Cluster/python-tldap | tldap/backend/fake_transactions.py | LDAPwrapper.reset | def reset(self, force_flush_cache: bool = False) -> None:
"""
Reset transaction back to original state, discarding all
uncompleted transactions.
"""
super(LDAPwrapper, self).reset()
if len(self._transactions) == 0:
raise RuntimeError("reset called outside a transaction.")
self._transactions[-1] = [] | python | def reset(self, force_flush_cache: bool = False) -> None:
"""
Reset transaction back to original state, discarding all
uncompleted transactions.
"""
super(LDAPwrapper, self).reset()
if len(self._transactions) == 0:
raise RuntimeError("reset called outside a transaction.")
self._transactions[-1] = [] | ['def', 'reset', '(', 'self', ',', 'force_flush_cache', ':', 'bool', '=', 'False', ')', '->', 'None', ':', 'super', '(', 'LDAPwrapper', ',', 'self', ')', '.', 'reset', '(', ')', 'if', 'len', '(', 'self', '.', '_transactions', ')', '==', '0', ':', 'raise', 'RuntimeError', '(', '"reset called outside a transaction."', ')', 'self', '.', '_transactions', '[', '-', '1', ']', '=', '[', ']'] | Reset transaction back to original state, discarding all
uncompleted transactions. | ['Reset', 'transaction', 'back', 'to', 'original', 'state', 'discarding', 'all', 'uncompleted', 'transactions', '.'] | train | https://github.com/Karaage-Cluster/python-tldap/blob/61f1af74a3648cb6491e7eeb1ee2eb395d67bf59/tldap/backend/fake_transactions.py#L68-L76 |
7,470 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | MAVLink.resource_request_send | def resource_request_send(self, request_id, uri_type, uri, transfer_type, storage, force_mavlink1=False):
'''
The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t)
'''
return self.send(self.resource_request_encode(request_id, uri_type, uri, transfer_type, storage), force_mavlink1=force_mavlink1) | python | def resource_request_send(self, request_id, uri_type, uri, transfer_type, storage, force_mavlink1=False):
'''
The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t)
'''
return self.send(self.resource_request_encode(request_id, uri_type, uri, transfer_type, storage), force_mavlink1=force_mavlink1) | ['def', 'resource_request_send', '(', 'self', ',', 'request_id', ',', 'uri_type', ',', 'uri', ',', 'transfer_type', ',', 'storage', ',', 'force_mavlink1', '=', 'False', ')', ':', 'return', 'self', '.', 'send', '(', 'self', '.', 'resource_request_encode', '(', 'request_id', ',', 'uri_type', ',', 'uri', ',', 'transfer_type', ',', 'storage', ')', ',', 'force_mavlink1', '=', 'force_mavlink1', ')'] | The autopilot is requesting a resource (file, binary, other type of
data)
request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t)
uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t)
uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t)
transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t)
storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t) | ['The', 'autopilot', 'is', 'requesting', 'a', 'resource', '(', 'file', 'binary', 'other', 'type', 'of', 'data', ')'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L12333-L12345 |
7,471 | pedrotgn/pyactor | pyactor/thread/actor.py | Actor.receive | def receive(self, msg):
'''
The message received from the queue specify a method of the
class the actor represents. This invokes it. If the
communication is an ASK, sends the result back
to the channel included in the message as an
ASKRESPONSE.
If it is a FUTURE, generates a FUTURERESPONSE
to send the result to the manager.
:param msg: The message is a dictionary using the constants
defined in util.py (:mod:`pyactor.util`).
'''
if msg[TYPE] == TELL and msg[METHOD] == 'stop':
self.running = False
self.future_manager.stop()
else:
result = None
try:
invoke = getattr(self._obj, msg[METHOD])
params = msg[PARAMS]
result = invoke(*params[0], **params[1])
except Exception, e:
if msg[TYPE] == TELL:
print e
return
result = e
self.send_response(result, msg) | python | def receive(self, msg):
'''
The message received from the queue specify a method of the
class the actor represents. This invokes it. If the
communication is an ASK, sends the result back
to the channel included in the message as an
ASKRESPONSE.
If it is a FUTURE, generates a FUTURERESPONSE
to send the result to the manager.
:param msg: The message is a dictionary using the constants
defined in util.py (:mod:`pyactor.util`).
'''
if msg[TYPE] == TELL and msg[METHOD] == 'stop':
self.running = False
self.future_manager.stop()
else:
result = None
try:
invoke = getattr(self._obj, msg[METHOD])
params = msg[PARAMS]
result = invoke(*params[0], **params[1])
except Exception, e:
if msg[TYPE] == TELL:
print e
return
result = e
self.send_response(result, msg) | ['def', 'receive', '(', 'self', ',', 'msg', ')', ':', 'if', 'msg', '[', 'TYPE', ']', '==', 'TELL', 'and', 'msg', '[', 'METHOD', ']', '==', "'stop'", ':', 'self', '.', 'running', '=', 'False', 'self', '.', 'future_manager', '.', 'stop', '(', ')', 'else', ':', 'result', '=', 'None', 'try', ':', 'invoke', '=', 'getattr', '(', 'self', '.', '_obj', ',', 'msg', '[', 'METHOD', ']', ')', 'params', '=', 'msg', '[', 'PARAMS', ']', 'result', '=', 'invoke', '(', '*', 'params', '[', '0', ']', ',', '*', '*', 'params', '[', '1', ']', ')', 'except', 'Exception', ',', 'e', ':', 'if', 'msg', '[', 'TYPE', ']', '==', 'TELL', ':', 'print', 'e', 'return', 'result', '=', 'e', 'self', '.', 'send_response', '(', 'result', ',', 'msg', ')'] | The message received from the queue specify a method of the
class the actor represents. This invokes it. If the
communication is an ASK, sends the result back
to the channel included in the message as an
ASKRESPONSE.
If it is a FUTURE, generates a FUTURERESPONSE
to send the result to the manager.
:param msg: The message is a dictionary using the constants
defined in util.py (:mod:`pyactor.util`). | ['The', 'message', 'received', 'from', 'the', 'queue', 'specify', 'a', 'method', 'of', 'the', 'class', 'the', 'actor', 'represents', '.', 'This', 'invokes', 'it', '.', 'If', 'the', 'communication', 'is', 'an', 'ASK', 'sends', 'the', 'result', 'back', 'to', 'the', 'channel', 'included', 'in', 'the', 'message', 'as', 'an', 'ASKRESPONSE', '.'] | train | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/thread/actor.py#L100-L128 |
7,472 | evhub/coconut | coconut/compiler/grammar.py | match_handle | def match_handle(loc, tokens):
"""Process match blocks."""
if len(tokens) == 4:
matches, match_type, item, stmts = tokens
cond = None
elif len(tokens) == 5:
matches, match_type, item, cond, stmts = tokens
else:
raise CoconutInternalException("invalid match statement tokens", tokens)
if match_type == "in":
invert = False
elif match_type == "not in":
invert = True
else:
raise CoconutInternalException("invalid match type", match_type)
matching = Matcher(loc, match_check_var)
matching.match(matches, match_to_var)
if cond:
matching.add_guard(cond)
return (
match_to_var + " = " + item + "\n"
+ matching.build(stmts, invert=invert)
) | python | def match_handle(loc, tokens):
"""Process match blocks."""
if len(tokens) == 4:
matches, match_type, item, stmts = tokens
cond = None
elif len(tokens) == 5:
matches, match_type, item, cond, stmts = tokens
else:
raise CoconutInternalException("invalid match statement tokens", tokens)
if match_type == "in":
invert = False
elif match_type == "not in":
invert = True
else:
raise CoconutInternalException("invalid match type", match_type)
matching = Matcher(loc, match_check_var)
matching.match(matches, match_to_var)
if cond:
matching.add_guard(cond)
return (
match_to_var + " = " + item + "\n"
+ matching.build(stmts, invert=invert)
) | ['def', 'match_handle', '(', 'loc', ',', 'tokens', ')', ':', 'if', 'len', '(', 'tokens', ')', '==', '4', ':', 'matches', ',', 'match_type', ',', 'item', ',', 'stmts', '=', 'tokens', 'cond', '=', 'None', 'elif', 'len', '(', 'tokens', ')', '==', '5', ':', 'matches', ',', 'match_type', ',', 'item', ',', 'cond', ',', 'stmts', '=', 'tokens', 'else', ':', 'raise', 'CoconutInternalException', '(', '"invalid match statement tokens"', ',', 'tokens', ')', 'if', 'match_type', '==', '"in"', ':', 'invert', '=', 'False', 'elif', 'match_type', '==', '"not in"', ':', 'invert', '=', 'True', 'else', ':', 'raise', 'CoconutInternalException', '(', '"invalid match type"', ',', 'match_type', ')', 'matching', '=', 'Matcher', '(', 'loc', ',', 'match_check_var', ')', 'matching', '.', 'match', '(', 'matches', ',', 'match_to_var', ')', 'if', 'cond', ':', 'matching', '.', 'add_guard', '(', 'cond', ')', 'return', '(', 'match_to_var', '+', '" = "', '+', 'item', '+', '"\\n"', '+', 'matching', '.', 'build', '(', 'stmts', ',', 'invert', '=', 'invert', ')', ')'] | Process match blocks. | ['Process', 'match', 'blocks', '.'] | train | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/grammar.py#L518-L542 |
7,473 | phoebe-project/phoebe2 | phoebe/parameters/parameters.py | FloatArrayParameter.to_string_short | def to_string_short(self):
"""
see also :meth:`to_string`
:return: a shorter abreviated string reprentation of the parameter
"""
opt = np.get_printoptions()
np.set_printoptions(threshold=8, edgeitems=3, linewidth=opt['linewidth']-len(self.uniquetwig)-2)
str_ = super(FloatArrayParameter, self).to_string_short()
np.set_printoptions(**opt)
return str_ | python | def to_string_short(self):
"""
see also :meth:`to_string`
:return: a shorter abreviated string reprentation of the parameter
"""
opt = np.get_printoptions()
np.set_printoptions(threshold=8, edgeitems=3, linewidth=opt['linewidth']-len(self.uniquetwig)-2)
str_ = super(FloatArrayParameter, self).to_string_short()
np.set_printoptions(**opt)
return str_ | ['def', 'to_string_short', '(', 'self', ')', ':', 'opt', '=', 'np', '.', 'get_printoptions', '(', ')', 'np', '.', 'set_printoptions', '(', 'threshold', '=', '8', ',', 'edgeitems', '=', '3', ',', 'linewidth', '=', 'opt', '[', "'linewidth'", ']', '-', 'len', '(', 'self', '.', 'uniquetwig', ')', '-', '2', ')', 'str_', '=', 'super', '(', 'FloatArrayParameter', ',', 'self', ')', '.', 'to_string_short', '(', ')', 'np', '.', 'set_printoptions', '(', '*', '*', 'opt', ')', 'return', 'str_'] | see also :meth:`to_string`
:return: a shorter abreviated string reprentation of the parameter | ['see', 'also', ':', 'meth', ':', 'to_string'] | train | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L4596-L4606 |
7,474 | liip/taxi | taxi/commands/start.py | start | def start(ctx, alias, description, f):
"""
Use it when you start working on the given activity. This will add the
activity and the current time to your entries file. When you're finished,
use the stop command.
"""
today = datetime.date.today()
try:
timesheet_collection = get_timesheet_collection_for_context(ctx, f)
except ParseError as e:
ctx.obj['view'].err(e)
return
t = timesheet_collection.latest()
# If there's a previous entry on the same date, check if we can use its
# end time as a start time for the newly started entry
today_entries = t.entries.filter(date=today)
if(today in today_entries and today_entries[today]
and isinstance(today_entries[today][-1].duration, tuple)
and today_entries[today][-1].duration[1] is not None):
new_entry_start_time = today_entries[today][-1].duration[1]
else:
new_entry_start_time = datetime.datetime.now()
description = ' '.join(description) if description else '?'
duration = (new_entry_start_time, None)
e = Entry(alias, duration, description)
t.entries[today].append(e)
t.save() | python | def start(ctx, alias, description, f):
"""
Use it when you start working on the given activity. This will add the
activity and the current time to your entries file. When you're finished,
use the stop command.
"""
today = datetime.date.today()
try:
timesheet_collection = get_timesheet_collection_for_context(ctx, f)
except ParseError as e:
ctx.obj['view'].err(e)
return
t = timesheet_collection.latest()
# If there's a previous entry on the same date, check if we can use its
# end time as a start time for the newly started entry
today_entries = t.entries.filter(date=today)
if(today in today_entries and today_entries[today]
and isinstance(today_entries[today][-1].duration, tuple)
and today_entries[today][-1].duration[1] is not None):
new_entry_start_time = today_entries[today][-1].duration[1]
else:
new_entry_start_time = datetime.datetime.now()
description = ' '.join(description) if description else '?'
duration = (new_entry_start_time, None)
e = Entry(alias, duration, description)
t.entries[today].append(e)
t.save() | ['def', 'start', '(', 'ctx', ',', 'alias', ',', 'description', ',', 'f', ')', ':', 'today', '=', 'datetime', '.', 'date', '.', 'today', '(', ')', 'try', ':', 'timesheet_collection', '=', 'get_timesheet_collection_for_context', '(', 'ctx', ',', 'f', ')', 'except', 'ParseError', 'as', 'e', ':', 'ctx', '.', 'obj', '[', "'view'", ']', '.', 'err', '(', 'e', ')', 'return', 't', '=', 'timesheet_collection', '.', 'latest', '(', ')', "# If there's a previous entry on the same date, check if we can use its", '# end time as a start time for the newly started entry', 'today_entries', '=', 't', '.', 'entries', '.', 'filter', '(', 'date', '=', 'today', ')', 'if', '(', 'today', 'in', 'today_entries', 'and', 'today_entries', '[', 'today', ']', 'and', 'isinstance', '(', 'today_entries', '[', 'today', ']', '[', '-', '1', ']', '.', 'duration', ',', 'tuple', ')', 'and', 'today_entries', '[', 'today', ']', '[', '-', '1', ']', '.', 'duration', '[', '1', ']', 'is', 'not', 'None', ')', ':', 'new_entry_start_time', '=', 'today_entries', '[', 'today', ']', '[', '-', '1', ']', '.', 'duration', '[', '1', ']', 'else', ':', 'new_entry_start_time', '=', 'datetime', '.', 'datetime', '.', 'now', '(', ')', 'description', '=', "' '", '.', 'join', '(', 'description', ')', 'if', 'description', 'else', "'?'", 'duration', '=', '(', 'new_entry_start_time', ',', 'None', ')', 'e', '=', 'Entry', '(', 'alias', ',', 'duration', ',', 'description', ')', 't', '.', 'entries', '[', 'today', ']', '.', 'append', '(', 'e', ')', 't', '.', 'save', '(', ')'] | Use it when you start working on the given activity. This will add the
activity and the current time to your entries file. When you're finished,
use the stop command. | ['Use', 'it', 'when', 'you', 'start', 'working', 'on', 'the', 'given', 'activity', '.', 'This', 'will', 'add', 'the', 'activity', 'and', 'the', 'current', 'time', 'to', 'your', 'entries', 'file', '.', 'When', 'you', 're', 'finished', 'use', 'the', 'stop', 'command', '.'] | train | https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/commands/start.py#L18-L49 |
7,475 | ga4gh/ga4gh-server | ga4gh/server/datarepo.py | SqlDataRepository.insertIndividual | def insertIndividual(self, individual):
"""
Inserts the specified individual into this repository.
"""
try:
models.Individual.create(
id=individual.getId(),
datasetId=individual.getParentContainer().getId(),
name=individual.getLocalId(),
description=individual.getDescription(),
created=individual.getCreated(),
updated=individual.getUpdated(),
species=json.dumps(individual.getSpecies()),
sex=json.dumps(individual.getSex()),
attributes=json.dumps(individual.getAttributes()))
except Exception:
raise exceptions.DuplicateNameException(
individual.getLocalId(),
individual.getParentContainer().getLocalId()) | python | def insertIndividual(self, individual):
"""
Inserts the specified individual into this repository.
"""
try:
models.Individual.create(
id=individual.getId(),
datasetId=individual.getParentContainer().getId(),
name=individual.getLocalId(),
description=individual.getDescription(),
created=individual.getCreated(),
updated=individual.getUpdated(),
species=json.dumps(individual.getSpecies()),
sex=json.dumps(individual.getSex()),
attributes=json.dumps(individual.getAttributes()))
except Exception:
raise exceptions.DuplicateNameException(
individual.getLocalId(),
individual.getParentContainer().getLocalId()) | ['def', 'insertIndividual', '(', 'self', ',', 'individual', ')', ':', 'try', ':', 'models', '.', 'Individual', '.', 'create', '(', 'id', '=', 'individual', '.', 'getId', '(', ')', ',', 'datasetId', '=', 'individual', '.', 'getParentContainer', '(', ')', '.', 'getId', '(', ')', ',', 'name', '=', 'individual', '.', 'getLocalId', '(', ')', ',', 'description', '=', 'individual', '.', 'getDescription', '(', ')', ',', 'created', '=', 'individual', '.', 'getCreated', '(', ')', ',', 'updated', '=', 'individual', '.', 'getUpdated', '(', ')', ',', 'species', '=', 'json', '.', 'dumps', '(', 'individual', '.', 'getSpecies', '(', ')', ')', ',', 'sex', '=', 'json', '.', 'dumps', '(', 'individual', '.', 'getSex', '(', ')', ')', ',', 'attributes', '=', 'json', '.', 'dumps', '(', 'individual', '.', 'getAttributes', '(', ')', ')', ')', 'except', 'Exception', ':', 'raise', 'exceptions', '.', 'DuplicateNameException', '(', 'individual', '.', 'getLocalId', '(', ')', ',', 'individual', '.', 'getParentContainer', '(', ')', '.', 'getLocalId', '(', ')', ')'] | Inserts the specified individual into this repository. | ['Inserts', 'the', 'specified', 'individual', 'into', 'this', 'repository', '.'] | train | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L1251-L1269 |
7,476 | 5monkeys/django-enumfield | django_enumfield/validators.py | validate_valid_transition | def validate_valid_transition(enum, from_value, to_value):
"""
Validate that to_value is a valid choice and that to_value is a valid transition from from_value.
"""
validate_available_choice(enum, to_value)
if hasattr(enum, '_transitions') and not enum.is_valid_transition(from_value, to_value):
message = _(six.text_type('{enum} can not go from "{from_value}" to "{to_value}"'))
raise InvalidStatusOperationError(message.format(
enum=enum.__name__,
from_value=enum.name(from_value),
to_value=enum.name(to_value) or to_value
)) | python | def validate_valid_transition(enum, from_value, to_value):
"""
Validate that to_value is a valid choice and that to_value is a valid transition from from_value.
"""
validate_available_choice(enum, to_value)
if hasattr(enum, '_transitions') and not enum.is_valid_transition(from_value, to_value):
message = _(six.text_type('{enum} can not go from "{from_value}" to "{to_value}"'))
raise InvalidStatusOperationError(message.format(
enum=enum.__name__,
from_value=enum.name(from_value),
to_value=enum.name(to_value) or to_value
)) | ['def', 'validate_valid_transition', '(', 'enum', ',', 'from_value', ',', 'to_value', ')', ':', 'validate_available_choice', '(', 'enum', ',', 'to_value', ')', 'if', 'hasattr', '(', 'enum', ',', "'_transitions'", ')', 'and', 'not', 'enum', '.', 'is_valid_transition', '(', 'from_value', ',', 'to_value', ')', ':', 'message', '=', '_', '(', 'six', '.', 'text_type', '(', '\'{enum} can not go from "{from_value}" to "{to_value}"\'', ')', ')', 'raise', 'InvalidStatusOperationError', '(', 'message', '.', 'format', '(', 'enum', '=', 'enum', '.', '__name__', ',', 'from_value', '=', 'enum', '.', 'name', '(', 'from_value', ')', ',', 'to_value', '=', 'enum', '.', 'name', '(', 'to_value', ')', 'or', 'to_value', ')', ')'] | Validate that to_value is a valid choice and that to_value is a valid transition from from_value. | ['Validate', 'that', 'to_value', 'is', 'a', 'valid', 'choice', 'and', 'that', 'to_value', 'is', 'a', 'valid', 'transition', 'from', 'from_value', '.'] | train | https://github.com/5monkeys/django-enumfield/blob/6cf20c0fba013d39960af0f4d2c9a3b399955eb3/django_enumfield/validators.py#L7-L18 |
7,477 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py | MessageSetItemEncoder | def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = b"".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize())
value._InternalSerialize(write)
return write(end_bytes)
return EncodeField | python | def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = b"".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize())
value._InternalSerialize(write)
return write(end_bytes)
return EncodeField | ['def', 'MessageSetItemEncoder', '(', 'field_number', ')', ':', 'start_bytes', '=', 'b""', '.', 'join', '(', '[', 'TagBytes', '(', '1', ',', 'wire_format', '.', 'WIRETYPE_START_GROUP', ')', ',', 'TagBytes', '(', '2', ',', 'wire_format', '.', 'WIRETYPE_VARINT', ')', ',', '_VarintBytes', '(', 'field_number', ')', ',', 'TagBytes', '(', '3', ',', 'wire_format', '.', 'WIRETYPE_LENGTH_DELIMITED', ')', ']', ')', 'end_bytes', '=', 'TagBytes', '(', '1', ',', 'wire_format', '.', 'WIRETYPE_END_GROUP', ')', 'local_EncodeVarint', '=', '_EncodeVarint', 'def', 'EncodeField', '(', 'write', ',', 'value', ')', ':', 'write', '(', 'start_bytes', ')', 'local_EncodeVarint', '(', 'write', ',', 'value', '.', 'ByteSize', '(', ')', ')', 'value', '.', '_InternalSerialize', '(', 'write', ')', 'return', 'write', '(', 'end_bytes', ')', 'return', 'EncodeField'] | Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
} | ['Encoder', 'for', 'extensions', 'of', 'MessageSet', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L774-L799 |
7,478 | limpyd/redis-limpyd-jobs | limpyd_jobs/workers.py | Worker.set_status | def set_status(self, status):
"""
Save the new status and call all defined callbacks
"""
self.status = status
for callback in self._update_status_callbacks:
callback(self) | python | def set_status(self, status):
"""
Save the new status and call all defined callbacks
"""
self.status = status
for callback in self._update_status_callbacks:
callback(self) | ['def', 'set_status', '(', 'self', ',', 'status', ')', ':', 'self', '.', 'status', '=', 'status', 'for', 'callback', 'in', 'self', '.', '_update_status_callbacks', ':', 'callback', '(', 'self', ')'] | Save the new status and call all defined callbacks | ['Save', 'the', 'new', 'status', 'and', 'call', 'all', 'defined', 'callbacks'] | train | https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L213-L219 |
7,479 | 7sDream/zhihu-py3 | zhihu/post.py | Post.upvoters | def upvoters(self):
"""获取文章的点赞用户
:return: 文章的点赞用户,返回生成器。
"""
from .author import Author, ANONYMOUS
self._make_soup()
headers = dict(Default_Header)
headers['Host'] = 'zhuanlan.zhihu.com'
json = self._session.get(
Post_Get_Upvoter.format(self.slug),
headers=headers
).json()
for au in json:
try:
yield Author(
au['profileUrl'],
au['name'],
au['bio'],
photo_url=au['avatar']['template'].format(
id=au['avatar']['id'], size='r'),
session=self._session
)
except ValueError: # invalid url
yield ANONYMOUS | python | def upvoters(self):
"""获取文章的点赞用户
:return: 文章的点赞用户,返回生成器。
"""
from .author import Author, ANONYMOUS
self._make_soup()
headers = dict(Default_Header)
headers['Host'] = 'zhuanlan.zhihu.com'
json = self._session.get(
Post_Get_Upvoter.format(self.slug),
headers=headers
).json()
for au in json:
try:
yield Author(
au['profileUrl'],
au['name'],
au['bio'],
photo_url=au['avatar']['template'].format(
id=au['avatar']['id'], size='r'),
session=self._session
)
except ValueError: # invalid url
yield ANONYMOUS | ['def', 'upvoters', '(', 'self', ')', ':', 'from', '.', 'author', 'import', 'Author', ',', 'ANONYMOUS', 'self', '.', '_make_soup', '(', ')', 'headers', '=', 'dict', '(', 'Default_Header', ')', 'headers', '[', "'Host'", ']', '=', "'zhuanlan.zhihu.com'", 'json', '=', 'self', '.', '_session', '.', 'get', '(', 'Post_Get_Upvoter', '.', 'format', '(', 'self', '.', 'slug', ')', ',', 'headers', '=', 'headers', ')', '.', 'json', '(', ')', 'for', 'au', 'in', 'json', ':', 'try', ':', 'yield', 'Author', '(', 'au', '[', "'profileUrl'", ']', ',', 'au', '[', "'name'", ']', ',', 'au', '[', "'bio'", ']', ',', 'photo_url', '=', 'au', '[', "'avatar'", ']', '[', "'template'", ']', '.', 'format', '(', 'id', '=', 'au', '[', "'avatar'", ']', '[', "'id'", ']', ',', 'size', '=', "'r'", ')', ',', 'session', '=', 'self', '.', '_session', ')', 'except', 'ValueError', ':', '# invalid url', 'yield', 'ANONYMOUS'] | 获取文章的点赞用户
:return: 文章的点赞用户,返回生成器。 | ['获取文章的点赞用户'] | train | https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/post.py#L166-L190 |
7,480 | klen/muffin-admin | muffin_admin/handler.py | AdminHandler.get | async def get(self, request):
"""Get collection of resources."""
form = await self.get_form(request)
ctx = dict(active=self, form=form, request=request)
if self.resource:
return self.app.ps.jinja2.render(self.template_item, **ctx)
return self.app.ps.jinja2.render(self.template_list, **ctx) | python | async def get(self, request):
"""Get collection of resources."""
form = await self.get_form(request)
ctx = dict(active=self, form=form, request=request)
if self.resource:
return self.app.ps.jinja2.render(self.template_item, **ctx)
return self.app.ps.jinja2.render(self.template_list, **ctx) | ['async', 'def', 'get', '(', 'self', ',', 'request', ')', ':', 'form', '=', 'await', 'self', '.', 'get_form', '(', 'request', ')', 'ctx', '=', 'dict', '(', 'active', '=', 'self', ',', 'form', '=', 'form', ',', 'request', '=', 'request', ')', 'if', 'self', '.', 'resource', ':', 'return', 'self', '.', 'app', '.', 'ps', '.', 'jinja2', '.', 'render', '(', 'self', '.', 'template_item', ',', '*', '*', 'ctx', ')', 'return', 'self', '.', 'app', '.', 'ps', '.', 'jinja2', '.', 'render', '(', 'self', '.', 'template_list', ',', '*', '*', 'ctx', ')'] | Get collection of resources. | ['Get', 'collection', 'of', 'resources', '.'] | train | https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/handler.py#L184-L190 |
7,481 | manns/pyspread | pyspread/src/actions/_grid_actions.py | TableActions.replace_cells | def replace_cells(self, key, sorted_row_idxs):
"""Replaces cells in current selection so that they are sorted"""
row, col, tab = key
new_keys = {}
del_keys = []
selection = self.grid.actions.get_selection()
for __row, __col, __tab in self.grid.code_array:
if __tab == tab and \
(not selection or (__row, __col) in selection):
new_row = sorted_row_idxs.index(__row)
if __row != new_row:
new_keys[(new_row, __col, __tab)] = \
self.grid.code_array((__row, __col, __tab))
del_keys.append((__row, __col, __tab))
for key in del_keys:
self.grid.code_array.pop(key)
for key in new_keys:
CellActions.set_code(self, key, new_keys[key]) | python | def replace_cells(self, key, sorted_row_idxs):
"""Replaces cells in current selection so that they are sorted"""
row, col, tab = key
new_keys = {}
del_keys = []
selection = self.grid.actions.get_selection()
for __row, __col, __tab in self.grid.code_array:
if __tab == tab and \
(not selection or (__row, __col) in selection):
new_row = sorted_row_idxs.index(__row)
if __row != new_row:
new_keys[(new_row, __col, __tab)] = \
self.grid.code_array((__row, __col, __tab))
del_keys.append((__row, __col, __tab))
for key in del_keys:
self.grid.code_array.pop(key)
for key in new_keys:
CellActions.set_code(self, key, new_keys[key]) | ['def', 'replace_cells', '(', 'self', ',', 'key', ',', 'sorted_row_idxs', ')', ':', 'row', ',', 'col', ',', 'tab', '=', 'key', 'new_keys', '=', '{', '}', 'del_keys', '=', '[', ']', 'selection', '=', 'self', '.', 'grid', '.', 'actions', '.', 'get_selection', '(', ')', 'for', '__row', ',', '__col', ',', '__tab', 'in', 'self', '.', 'grid', '.', 'code_array', ':', 'if', '__tab', '==', 'tab', 'and', '(', 'not', 'selection', 'or', '(', '__row', ',', '__col', ')', 'in', 'selection', ')', ':', 'new_row', '=', 'sorted_row_idxs', '.', 'index', '(', '__row', ')', 'if', '__row', '!=', 'new_row', ':', 'new_keys', '[', '(', 'new_row', ',', '__col', ',', '__tab', ')', ']', '=', 'self', '.', 'grid', '.', 'code_array', '(', '(', '__row', ',', '__col', ',', '__tab', ')', ')', 'del_keys', '.', 'append', '(', '(', '__row', ',', '__col', ',', '__tab', ')', ')', 'for', 'key', 'in', 'del_keys', ':', 'self', '.', 'grid', '.', 'code_array', '.', 'pop', '(', 'key', ')', 'for', 'key', 'in', 'new_keys', ':', 'CellActions', '.', 'set_code', '(', 'self', ',', 'key', ',', 'new_keys', '[', 'key', ']', ')'] | Replaces cells in current selection so that they are sorted | ['Replaces', 'cells', 'in', 'current', 'selection', 'so', 'that', 'they', 'are', 'sorted'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_actions.py#L1024-L1047 |
7,482 | xguse/table_enforcer | table_enforcer/utils/validate/funcs.py | unique | def unique(series: pd.Series) -> pd.Series:
"""Test that the data items do not repeat."""
return ~series.duplicated(keep=False) | python | def unique(series: pd.Series) -> pd.Series:
"""Test that the data items do not repeat."""
return ~series.duplicated(keep=False) | ['def', 'unique', '(', 'series', ':', 'pd', '.', 'Series', ')', '->', 'pd', '.', 'Series', ':', 'return', '~', 'series', '.', 'duplicated', '(', 'keep', '=', 'False', ')'] | Test that the data items do not repeat. | ['Test', 'that', 'the', 'data', 'items', 'do', 'not', 'repeat', '.'] | train | https://github.com/xguse/table_enforcer/blob/f3137839574bf8ea933a14ea16a8acba45e3e0c3/table_enforcer/utils/validate/funcs.py#L30-L32 |
7,483 | pypa/pipenv | pipenv/vendor/pipreqs/pipreqs.py | parse_requirements | def parse_requirements(file_):
"""Parse a requirements formatted file.
Traverse a string until a delimiter is detected, then split at said
delimiter, get module name by element index, create a dict consisting of
module:version, and add dict to list of parsed modules.
Args:
file_: File to parse.
Raises:
OSerror: If there's any issues accessing the file.
Returns:
tuple: The contents of the file, excluding comments.
"""
modules = []
delim = ["<", ">", "=", "!", "~"] # https://www.python.org/dev/peps/pep-0508/#complete-grammar
try:
f = open_func(file_, "r")
except OSError:
logging.error("Failed on file: {}".format(file_))
raise
else:
data = [x.strip() for x in f.readlines() if x != "\n"]
finally:
f.close()
data = [x for x in data if x[0].isalpha()]
for x in data:
if not any([y in x for y in delim]): # Check for modules w/o a specifier.
modules.append({"name": x, "version": None})
for y in x:
if y in delim:
module = x.split(y)
module_name = module[0]
module_version = module[-1].replace("=", "")
module = {"name": module_name, "version": module_version}
if module not in modules:
modules.append(module)
break
return modules | python | def parse_requirements(file_):
"""Parse a requirements formatted file.
Traverse a string until a delimiter is detected, then split at said
delimiter, get module name by element index, create a dict consisting of
module:version, and add dict to list of parsed modules.
Args:
file_: File to parse.
Raises:
OSerror: If there's any issues accessing the file.
Returns:
tuple: The contents of the file, excluding comments.
"""
modules = []
delim = ["<", ">", "=", "!", "~"] # https://www.python.org/dev/peps/pep-0508/#complete-grammar
try:
f = open_func(file_, "r")
except OSError:
logging.error("Failed on file: {}".format(file_))
raise
else:
data = [x.strip() for x in f.readlines() if x != "\n"]
finally:
f.close()
data = [x for x in data if x[0].isalpha()]
for x in data:
if not any([y in x for y in delim]): # Check for modules w/o a specifier.
modules.append({"name": x, "version": None})
for y in x:
if y in delim:
module = x.split(y)
module_name = module[0]
module_version = module[-1].replace("=", "")
module = {"name": module_name, "version": module_version}
if module not in modules:
modules.append(module)
break
return modules | ['def', 'parse_requirements', '(', 'file_', ')', ':', 'modules', '=', '[', ']', 'delim', '=', '[', '"<"', ',', '">"', ',', '"="', ',', '"!"', ',', '"~"', ']', '# https://www.python.org/dev/peps/pep-0508/#complete-grammar', 'try', ':', 'f', '=', 'open_func', '(', 'file_', ',', '"r"', ')', 'except', 'OSError', ':', 'logging', '.', 'error', '(', '"Failed on file: {}"', '.', 'format', '(', 'file_', ')', ')', 'raise', 'else', ':', 'data', '=', '[', 'x', '.', 'strip', '(', ')', 'for', 'x', 'in', 'f', '.', 'readlines', '(', ')', 'if', 'x', '!=', '"\\n"', ']', 'finally', ':', 'f', '.', 'close', '(', ')', 'data', '=', '[', 'x', 'for', 'x', 'in', 'data', 'if', 'x', '[', '0', ']', '.', 'isalpha', '(', ')', ']', 'for', 'x', 'in', 'data', ':', 'if', 'not', 'any', '(', '[', 'y', 'in', 'x', 'for', 'y', 'in', 'delim', ']', ')', ':', '# Check for modules w/o a specifier.', 'modules', '.', 'append', '(', '{', '"name"', ':', 'x', ',', '"version"', ':', 'None', '}', ')', 'for', 'y', 'in', 'x', ':', 'if', 'y', 'in', 'delim', ':', 'module', '=', 'x', '.', 'split', '(', 'y', ')', 'module_name', '=', 'module', '[', '0', ']', 'module_version', '=', 'module', '[', '-', '1', ']', '.', 'replace', '(', '"="', ',', '""', ')', 'module', '=', '{', '"name"', ':', 'module_name', ',', '"version"', ':', 'module_version', '}', 'if', 'module', 'not', 'in', 'modules', ':', 'modules', '.', 'append', '(', 'module', ')', 'break', 'return', 'modules'] | Parse a requirements formatted file.
Traverse a string until a delimiter is detected, then split at said
delimiter, get module name by element index, create a dict consisting of
module:version, and add dict to list of parsed modules.
Args:
file_: File to parse.
Raises:
OSerror: If there's any issues accessing the file.
Returns:
tuple: The contents of the file, excluding comments. | ['Parse', 'a', 'requirements', 'formatted', 'file', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pipreqs/pipreqs.py#L231-L277 |
7,484 | lowandrew/OLCTools | coreGenome/annotate.py | Annotate.codingthreads | def codingthreads(self):
"""
Find CDS features in .gff files to filter out non-coding sequences from the analysis
"""
printtime('Extracting CDS features', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.codingsequences, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.runmetadata.samples:
self.codingqueue.put(sample)
self.codingqueue.join()
# Create CDS files and determine gene presence/absence
self.corethreads() | python | def codingthreads(self):
"""
Find CDS features in .gff files to filter out non-coding sequences from the analysis
"""
printtime('Extracting CDS features', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.codingsequences, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.runmetadata.samples:
self.codingqueue.put(sample)
self.codingqueue.join()
# Create CDS files and determine gene presence/absence
self.corethreads() | ['def', 'codingthreads', '(', 'self', ')', ':', 'printtime', '(', "'Extracting CDS features'", ',', 'self', '.', 'start', ')', '# Create and start threads', 'for', 'i', 'in', 'range', '(', 'self', '.', 'cpus', ')', ':', '# Send the threads to the appropriate destination function', 'threads', '=', 'Thread', '(', 'target', '=', 'self', '.', 'codingsequences', ',', 'args', '=', '(', ')', ')', '# Set the daemon to true - something to do with thread management', 'threads', '.', 'setDaemon', '(', 'True', ')', '# Start the threading', 'threads', '.', 'start', '(', ')', 'for', 'sample', 'in', 'self', '.', 'runmetadata', '.', 'samples', ':', 'self', '.', 'codingqueue', '.', 'put', '(', 'sample', ')', 'self', '.', 'codingqueue', '.', 'join', '(', ')', '# Create CDS files and determine gene presence/absence', 'self', '.', 'corethreads', '(', ')'] | Find CDS features in .gff files to filter out non-coding sequences from the analysis | ['Find', 'CDS', 'features', 'in', '.', 'gff', 'files', 'to', 'filter', 'out', 'non', '-', 'coding', 'sequences', 'from', 'the', 'analysis'] | train | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/annotate.py#L142-L159 |
7,485 | BetterWorks/django-anonymizer | anonymizer/replacers.py | phonenumber | def phonenumber(anon, obj, field, val):
"""
Generates a random US-style phone number
"""
return anon.faker.phone_number(field=field) | python | def phonenumber(anon, obj, field, val):
"""
Generates a random US-style phone number
"""
return anon.faker.phone_number(field=field) | ['def', 'phonenumber', '(', 'anon', ',', 'obj', ',', 'field', ',', 'val', ')', ':', 'return', 'anon', '.', 'faker', '.', 'phone_number', '(', 'field', '=', 'field', ')'] | Generates a random US-style phone number | ['Generates', 'a', 'random', 'US', '-', 'style', 'phone', 'number'] | train | https://github.com/BetterWorks/django-anonymizer/blob/2d25bb6e8b5e4230c58031c4b6d10cc536669b3e/anonymizer/replacers.py#L138-L142 |
7,486 | estnltk/estnltk | estnltk/text.py | Text.tag_syntax | def tag_syntax(self):
""" Parses this text with the syntactic analyzer (``self.__syntactic_parser``),
and stores the found syntactic analyses: into the layer LAYER_CONLL (if MaltParser
is used, default), or into the layer LAYER_VISLCG3 (if VISLCG3Parser is used).
"""
# Load default Syntactic tagger:
if self.__syntactic_parser is None:
self.__syntactic_parser = load_default_syntactic_parser()
if not self.is_tagged(ANALYSIS):
if isinstance(self.__syntactic_parser, MaltParser):
# By default: Use disambiguation for MaltParser's input
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = True
self.tag_analysis()
elif isinstance(self.__syntactic_parser, VISLCG3Parser):
# By default: Do not use disambiguation for VISLCG3Parser's input
# (VISLCG3 already does its own rule-based disambiguation)
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = False
self.tag_analysis()
return self.__syntactic_parser.parse_text( self, **self.__kwargs ) | python | def tag_syntax(self):
""" Parses this text with the syntactic analyzer (``self.__syntactic_parser``),
and stores the found syntactic analyses: into the layer LAYER_CONLL (if MaltParser
is used, default), or into the layer LAYER_VISLCG3 (if VISLCG3Parser is used).
"""
# Load default Syntactic tagger:
if self.__syntactic_parser is None:
self.__syntactic_parser = load_default_syntactic_parser()
if not self.is_tagged(ANALYSIS):
if isinstance(self.__syntactic_parser, MaltParser):
# By default: Use disambiguation for MaltParser's input
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = True
self.tag_analysis()
elif isinstance(self.__syntactic_parser, VISLCG3Parser):
# By default: Do not use disambiguation for VISLCG3Parser's input
# (VISLCG3 already does its own rule-based disambiguation)
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = False
self.tag_analysis()
return self.__syntactic_parser.parse_text( self, **self.__kwargs ) | ['def', 'tag_syntax', '(', 'self', ')', ':', '# Load default Syntactic tagger:', 'if', 'self', '.', '__syntactic_parser', 'is', 'None', ':', 'self', '.', '__syntactic_parser', '=', 'load_default_syntactic_parser', '(', ')', 'if', 'not', 'self', '.', 'is_tagged', '(', 'ANALYSIS', ')', ':', 'if', 'isinstance', '(', 'self', '.', '__syntactic_parser', ',', 'MaltParser', ')', ':', "# By default: Use disambiguation for MaltParser's input", 'if', "'disambiguate'", 'not', 'in', 'self', '.', '__kwargs', ':', 'self', '.', '__kwargs', '[', "'disambiguate'", ']', '=', 'True', 'self', '.', 'tag_analysis', '(', ')', 'elif', 'isinstance', '(', 'self', '.', '__syntactic_parser', ',', 'VISLCG3Parser', ')', ':', "# By default: Do not use disambiguation for VISLCG3Parser's input", '# (VISLCG3 already does its own rule-based disambiguation)', 'if', "'disambiguate'", 'not', 'in', 'self', '.', '__kwargs', ':', 'self', '.', '__kwargs', '[', "'disambiguate'", ']', '=', 'False', 'self', '.', 'tag_analysis', '(', ')', 'return', 'self', '.', '__syntactic_parser', '.', 'parse_text', '(', 'self', ',', '*', '*', 'self', '.', '__kwargs', ')'] | Parses this text with the syntactic analyzer (``self.__syntactic_parser``),
and stores the found syntactic analyses: into the layer LAYER_CONLL (if MaltParser
is used, default), or into the layer LAYER_VISLCG3 (if VISLCG3Parser is used). | ['Parses', 'this', 'text', 'with', 'the', 'syntactic', 'analyzer', '(', 'self', '.', '__syntactic_parser', ')', 'and', 'stores', 'the', 'found', 'syntactic', 'analyses', ':', 'into', 'the', 'layer', 'LAYER_CONLL', '(', 'if', 'MaltParser', 'is', 'used', 'default', ')', 'or', 'into', 'the', 'layer', 'LAYER_VISLCG3', '(', 'if', 'VISLCG3Parser', 'is', 'used', ')', '.'] | train | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L736-L756 |
7,487 | jaredLunde/vital-tools | vital/cache/async_decorators.py | async_lru | def async_lru(size=100):
""" An LRU cache for asyncio coroutines in Python 3.5
..
@async_lru(1024)
async def slow_coroutine(*args, **kwargs):
return await some_other_slow_coroutine()
..
"""
cache = collections.OrderedDict()
def decorator(fn):
@wraps(fn)
@asyncio.coroutine
def memoizer(*args, **kwargs):
key = str((args, kwargs))
try:
result = cache.pop(key)
cache[key] = result
except KeyError:
if len(cache) >= size:
cache.popitem(last=False)
result = cache[key] = yield from fn(*args, **kwargs)
return result
return memoizer
return decorator | python | def async_lru(size=100):
""" An LRU cache for asyncio coroutines in Python 3.5
..
@async_lru(1024)
async def slow_coroutine(*args, **kwargs):
return await some_other_slow_coroutine()
..
"""
cache = collections.OrderedDict()
def decorator(fn):
@wraps(fn)
@asyncio.coroutine
def memoizer(*args, **kwargs):
key = str((args, kwargs))
try:
result = cache.pop(key)
cache[key] = result
except KeyError:
if len(cache) >= size:
cache.popitem(last=False)
result = cache[key] = yield from fn(*args, **kwargs)
return result
return memoizer
return decorator | ['def', 'async_lru', '(', 'size', '=', '100', ')', ':', 'cache', '=', 'collections', '.', 'OrderedDict', '(', ')', 'def', 'decorator', '(', 'fn', ')', ':', '@', 'wraps', '(', 'fn', ')', '@', 'asyncio', '.', 'coroutine', 'def', 'memoizer', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'key', '=', 'str', '(', '(', 'args', ',', 'kwargs', ')', ')', 'try', ':', 'result', '=', 'cache', '.', 'pop', '(', 'key', ')', 'cache', '[', 'key', ']', '=', 'result', 'except', 'KeyError', ':', 'if', 'len', '(', 'cache', ')', '>=', 'size', ':', 'cache', '.', 'popitem', '(', 'last', '=', 'False', ')', 'result', '=', 'cache', '[', 'key', ']', '=', 'yield', 'from', 'fn', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'result', 'return', 'memoizer', 'return', 'decorator'] | An LRU cache for asyncio coroutines in Python 3.5
..
@async_lru(1024)
async def slow_coroutine(*args, **kwargs):
return await some_other_slow_coroutine()
.. | ['An', 'LRU', 'cache', 'for', 'asyncio', 'coroutines', 'in', 'Python', '3', '.', '5', '..'] | train | https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/cache/async_decorators.py#L7-L31 |
7,488 | pypa/pipenv | pipenv/vendor/requests/adapters.py | HTTPAdapter.get_connection | def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL("Please check proxy URL. It is malformed"
" and could be missing the host.")
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn | python | def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL("Please check proxy URL. It is malformed"
" and could be missing the host.")
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn | ['def', 'get_connection', '(', 'self', ',', 'url', ',', 'proxies', '=', 'None', ')', ':', 'proxy', '=', 'select_proxy', '(', 'url', ',', 'proxies', ')', 'if', 'proxy', ':', 'proxy', '=', 'prepend_scheme_if_needed', '(', 'proxy', ',', "'http'", ')', 'proxy_url', '=', 'parse_url', '(', 'proxy', ')', 'if', 'not', 'proxy_url', '.', 'host', ':', 'raise', 'InvalidProxyURL', '(', '"Please check proxy URL. It is malformed"', '" and could be missing the host."', ')', 'proxy_manager', '=', 'self', '.', 'proxy_manager_for', '(', 'proxy', ')', 'conn', '=', 'proxy_manager', '.', 'connection_from_url', '(', 'url', ')', 'else', ':', '# Only scheme should be lower case', 'parsed', '=', 'urlparse', '(', 'url', ')', 'url', '=', 'parsed', '.', 'geturl', '(', ')', 'conn', '=', 'self', '.', 'poolmanager', '.', 'connection_from_url', '(', 'url', ')', 'return', 'conn'] | Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool | ['Returns', 'a', 'urllib3', 'connection', 'for', 'the', 'given', 'URL', '.', 'This', 'should', 'not', 'be', 'called', 'from', 'user', 'code', 'and', 'is', 'only', 'exposed', 'for', 'use', 'when', 'subclassing', 'the', ':', 'class', ':', 'HTTPAdapter', '<requests', '.', 'adapters', '.', 'HTTPAdapter', '>', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/adapters.py#L292-L317 |
7,489 | aouyar/PyMunin | pysysinfo/netstat.py | NetstatInfo.getTCPportConnStatus | def getTCPportConnStatus(self, ipv4=True, ipv6=True, include_listen=False,
**kwargs):
"""Returns the number of TCP endpoints discriminated by status.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping connection status to the
number of endpoints.
"""
status_dict = {}
result = self.getStats(tcp=True, udp=False,
include_listen=include_listen,
ipv4=ipv4, ipv6=ipv6,
**kwargs)
stats = result['stats']
for stat in stats:
if stat is not None:
status = stat[8].lower()
status_dict[status] = status_dict.get(status, 0) + 1
return status_dict | python | def getTCPportConnStatus(self, ipv4=True, ipv6=True, include_listen=False,
**kwargs):
"""Returns the number of TCP endpoints discriminated by status.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping connection status to the
number of endpoints.
"""
status_dict = {}
result = self.getStats(tcp=True, udp=False,
include_listen=include_listen,
ipv4=ipv4, ipv6=ipv6,
**kwargs)
stats = result['stats']
for stat in stats:
if stat is not None:
status = stat[8].lower()
status_dict[status] = status_dict.get(status, 0) + 1
return status_dict | ['def', 'getTCPportConnStatus', '(', 'self', ',', 'ipv4', '=', 'True', ',', 'ipv6', '=', 'True', ',', 'include_listen', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'status_dict', '=', '{', '}', 'result', '=', 'self', '.', 'getStats', '(', 'tcp', '=', 'True', ',', 'udp', '=', 'False', ',', 'include_listen', '=', 'include_listen', ',', 'ipv4', '=', 'ipv4', ',', 'ipv6', '=', 'ipv6', ',', '*', '*', 'kwargs', ')', 'stats', '=', 'result', '[', "'stats'", ']', 'for', 'stat', 'in', 'stats', ':', 'if', 'stat', 'is', 'not', 'None', ':', 'status', '=', 'stat', '[', '8', ']', '.', 'lower', '(', ')', 'status_dict', '[', 'status', ']', '=', 'status_dict', '.', 'get', '(', 'status', ',', '0', ')', '+', '1', 'return', 'status_dict'] | Returns the number of TCP endpoints discriminated by status.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping connection status to the
number of endpoints. | ['Returns', 'the', 'number', 'of', 'TCP', 'endpoints', 'discriminated', 'by', 'status', '.'] | train | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/netstat.py#L172-L209 |
7,490 | gwpy/gwpy | gwpy/timeseries/io/wav.py | write | def write(series, output, scale=None):
"""Write a `TimeSeries` to a WAV file
Parameters
----------
series : `TimeSeries`
the series to write
output : `file`, `str`
the file object or filename to write to
scale : `float`, optional
the factor to apply to scale the data to (-1.0, 1.0),
pass `scale=1` to not apply any scale, otherwise
the data will be auto-scaled
See also
--------
scipy.io.wavfile.write
for details on how the WAV file is actually written
Examples
--------
>>> from gwpy.timeseries import TimeSeries
>>> t = TimeSeries([1, 2, 3, 4, 5])
>>> t = TimeSeries.write('test.wav')
"""
fsamp = int(series.sample_rate.decompose().value)
if scale is None:
scale = 1 / numpy.abs(series.value).max()
data = (series.value * scale).astype('float32')
return wavfile.write(output, fsamp, data) | python | def write(series, output, scale=None):
"""Write a `TimeSeries` to a WAV file
Parameters
----------
series : `TimeSeries`
the series to write
output : `file`, `str`
the file object or filename to write to
scale : `float`, optional
the factor to apply to scale the data to (-1.0, 1.0),
pass `scale=1` to not apply any scale, otherwise
the data will be auto-scaled
See also
--------
scipy.io.wavfile.write
for details on how the WAV file is actually written
Examples
--------
>>> from gwpy.timeseries import TimeSeries
>>> t = TimeSeries([1, 2, 3, 4, 5])
>>> t = TimeSeries.write('test.wav')
"""
fsamp = int(series.sample_rate.decompose().value)
if scale is None:
scale = 1 / numpy.abs(series.value).max()
data = (series.value * scale).astype('float32')
return wavfile.write(output, fsamp, data) | ['def', 'write', '(', 'series', ',', 'output', ',', 'scale', '=', 'None', ')', ':', 'fsamp', '=', 'int', '(', 'series', '.', 'sample_rate', '.', 'decompose', '(', ')', '.', 'value', ')', 'if', 'scale', 'is', 'None', ':', 'scale', '=', '1', '/', 'numpy', '.', 'abs', '(', 'series', '.', 'value', ')', '.', 'max', '(', ')', 'data', '=', '(', 'series', '.', 'value', '*', 'scale', ')', '.', 'astype', '(', "'float32'", ')', 'return', 'wavfile', '.', 'write', '(', 'output', ',', 'fsamp', ',', 'data', ')'] | Write a `TimeSeries` to a WAV file
Parameters
----------
series : `TimeSeries`
the series to write
output : `file`, `str`
the file object or filename to write to
scale : `float`, optional
the factor to apply to scale the data to (-1.0, 1.0),
pass `scale=1` to not apply any scale, otherwise
the data will be auto-scaled
See also
--------
scipy.io.wavfile.write
for details on how the WAV file is actually written
Examples
--------
>>> from gwpy.timeseries import TimeSeries
>>> t = TimeSeries([1, 2, 3, 4, 5])
>>> t = TimeSeries.write('test.wav') | ['Write', 'a', 'TimeSeries', 'to', 'a', 'WAV', 'file'] | train | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/wav.py#L60-L91 |
7,491 | saltstack/salt | salt/modules/event.py | fire | def fire(data, tag, timeout=None):
'''
Fire an event on the local minion event bus. Data must be formed as a dict.
CLI Example:
.. code-block:: bash
salt '*' event.fire '{"data":"my event data"}' 'tag'
'''
if timeout is None:
timeout = 60000
else:
timeout = timeout * 1000
try:
event = salt.utils.event.get_event(__opts__.get('__role', 'minion'),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
opts=__opts__,
keep_loop=True,
listen=False)
return event.fire_event(data, tag, timeout=timeout)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log.debug(lines)
return False | python | def fire(data, tag, timeout=None):
'''
Fire an event on the local minion event bus. Data must be formed as a dict.
CLI Example:
.. code-block:: bash
salt '*' event.fire '{"data":"my event data"}' 'tag'
'''
if timeout is None:
timeout = 60000
else:
timeout = timeout * 1000
try:
event = salt.utils.event.get_event(__opts__.get('__role', 'minion'),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
opts=__opts__,
keep_loop=True,
listen=False)
return event.fire_event(data, tag, timeout=timeout)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log.debug(lines)
return False | ['def', 'fire', '(', 'data', ',', 'tag', ',', 'timeout', '=', 'None', ')', ':', 'if', 'timeout', 'is', 'None', ':', 'timeout', '=', '60000', 'else', ':', 'timeout', '=', 'timeout', '*', '1000', 'try', ':', 'event', '=', 'salt', '.', 'utils', '.', 'event', '.', 'get_event', '(', '__opts__', '.', 'get', '(', "'__role'", ',', "'minion'", ')', ',', 'sock_dir', '=', '__opts__', '[', "'sock_dir'", ']', ',', 'transport', '=', '__opts__', '[', "'transport'", ']', ',', 'opts', '=', '__opts__', ',', 'keep_loop', '=', 'True', ',', 'listen', '=', 'False', ')', 'return', 'event', '.', 'fire_event', '(', 'data', ',', 'tag', ',', 'timeout', '=', 'timeout', ')', 'except', 'Exception', ':', 'exc_type', ',', 'exc_value', ',', 'exc_traceback', '=', 'sys', '.', 'exc_info', '(', ')', 'lines', '=', 'traceback', '.', 'format_exception', '(', 'exc_type', ',', 'exc_value', ',', 'exc_traceback', ')', 'log', '.', 'debug', '(', 'lines', ')', 'return', 'False'] | Fire an event on the local minion event bus. Data must be formed as a dict.
CLI Example:
.. code-block:: bash
salt '*' event.fire '{"data":"my event data"}' 'tag' | ['Fire', 'an', 'event', 'on', 'the', 'local', 'minion', 'event', 'bus', '.', 'Data', 'must', 'be', 'formed', 'as', 'a', 'dict', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/event.py#L101-L128 |
7,492 | ryan-roemer/django-cloud-browser | cloud_browser/cloud/fs.py | FilesystemConnection._get_containers | def _get_containers(self):
"""Return available containers."""
def full_fn(path):
return os.path.join(self.abs_root, path)
return [self.cont_cls.from_path(self, d)
for d in os.listdir(self.abs_root) if is_dir(full_fn(d))] | python | def _get_containers(self):
"""Return available containers."""
def full_fn(path):
return os.path.join(self.abs_root, path)
return [self.cont_cls.from_path(self, d)
for d in os.listdir(self.abs_root) if is_dir(full_fn(d))] | ['def', '_get_containers', '(', 'self', ')', ':', 'def', 'full_fn', '(', 'path', ')', ':', 'return', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'abs_root', ',', 'path', ')', 'return', '[', 'self', '.', 'cont_cls', '.', 'from_path', '(', 'self', ',', 'd', ')', 'for', 'd', 'in', 'os', '.', 'listdir', '(', 'self', '.', 'abs_root', ')', 'if', 'is_dir', '(', 'full_fn', '(', 'd', ')', ')', ']'] | Return available containers. | ['Return', 'available', 'containers', '.'] | train | https://github.com/ryan-roemer/django-cloud-browser/blob/b06cdd24885a6309e843ed924dbf1705b67e7f48/cloud_browser/cloud/fs.py#L142-L148 |
7,493 | kata198/AdvancedHTMLParser | AdvancedHTMLParser/Formatter.py | AdvancedHTMLFormatter.handle_data | def handle_data(self, data):
'''
handle_data - Internal for parsing
'''
if data:
inTag = self._inTag
if len(inTag) > 0:
if inTag[-1].tagName not in PRESERVE_CONTENTS_TAGS:
data = data.replace('\t', ' ').strip('\r\n')
if data.startswith(' '):
data = ' ' + data.lstrip()
if data.endswith(' '):
data = data.rstrip() + ' '
inTag[-1].appendText(data)
elif data.strip():
# Must be text prior to or after root node
raise MultipleRootNodeException() | python | def handle_data(self, data):
'''
handle_data - Internal for parsing
'''
if data:
inTag = self._inTag
if len(inTag) > 0:
if inTag[-1].tagName not in PRESERVE_CONTENTS_TAGS:
data = data.replace('\t', ' ').strip('\r\n')
if data.startswith(' '):
data = ' ' + data.lstrip()
if data.endswith(' '):
data = data.rstrip() + ' '
inTag[-1].appendText(data)
elif data.strip():
# Must be text prior to or after root node
raise MultipleRootNodeException() | ['def', 'handle_data', '(', 'self', ',', 'data', ')', ':', 'if', 'data', ':', 'inTag', '=', 'self', '.', '_inTag', 'if', 'len', '(', 'inTag', ')', '>', '0', ':', 'if', 'inTag', '[', '-', '1', ']', '.', 'tagName', 'not', 'in', 'PRESERVE_CONTENTS_TAGS', ':', 'data', '=', 'data', '.', 'replace', '(', "'\\t'", ',', "' '", ')', '.', 'strip', '(', "'\\r\\n'", ')', 'if', 'data', '.', 'startswith', '(', "' '", ')', ':', 'data', '=', "' '", '+', 'data', '.', 'lstrip', '(', ')', 'if', 'data', '.', 'endswith', '(', "' '", ')', ':', 'data', '=', 'data', '.', 'rstrip', '(', ')', '+', "' '", 'inTag', '[', '-', '1', ']', '.', 'appendText', '(', 'data', ')', 'elif', 'data', '.', 'strip', '(', ')', ':', '# Must be text prior to or after root node', 'raise', 'MultipleRootNodeException', '(', ')'] | handle_data - Internal for parsing | ['handle_data', '-', 'Internal', 'for', 'parsing'] | train | https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Formatter.py#L223-L239 |
7,494 | proycon/pynlpl | pynlpl/evaluation.py | auc | def auc(x, y, reorder=False): #from sklearn, http://scikit-learn.org, licensed under BSD License
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general fuction, given points on a curve. For computing the area
under the ROC-curve, see :func:`auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
auc_score : Computes the area under the ROC curve
"""
if np is None:
raise ImportError("No numpy installed")
# XXX: Consider using ``scipy.integrate`` instead, or moving to
# ``utils.extmath``
if not isinstance(x, np.ndarray): x = np.array(x)
if not isinstance(x, np.ndarray): y = np.array(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
x, y = np.array(sorted(points for points in zip(x, y))).T
h = np.diff(x)
else:
h = np.diff(x)
if np.any(h < 0):
h *= -1
assert not np.any(h < 0), ("Reordering is not turned on, and "
"The x array is not increasing: %s" % x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area | python | def auc(x, y, reorder=False): #from sklearn, http://scikit-learn.org, licensed under BSD License
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general fuction, given points on a curve. For computing the area
under the ROC-curve, see :func:`auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
auc_score : Computes the area under the ROC curve
"""
if np is None:
raise ImportError("No numpy installed")
# XXX: Consider using ``scipy.integrate`` instead, or moving to
# ``utils.extmath``
if not isinstance(x, np.ndarray): x = np.array(x)
if not isinstance(x, np.ndarray): y = np.array(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
x, y = np.array(sorted(points for points in zip(x, y))).T
h = np.diff(x)
else:
h = np.diff(x)
if np.any(h < 0):
h *= -1
assert not np.any(h < 0), ("Reordering is not turned on, and "
"The x array is not increasing: %s" % x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area | ['def', 'auc', '(', 'x', ',', 'y', ',', 'reorder', '=', 'False', ')', ':', '#from sklearn, http://scikit-learn.org, licensed under BSD License', 'if', 'np', 'is', 'None', ':', 'raise', 'ImportError', '(', '"No numpy installed"', ')', '# XXX: Consider using ``scipy.integrate`` instead, or moving to', '# ``utils.extmath``', 'if', 'not', 'isinstance', '(', 'x', ',', 'np', '.', 'ndarray', ')', ':', 'x', '=', 'np', '.', 'array', '(', 'x', ')', 'if', 'not', 'isinstance', '(', 'x', ',', 'np', '.', 'ndarray', ')', ':', 'y', '=', 'np', '.', 'array', '(', 'y', ')', 'if', 'x', '.', 'shape', '[', '0', ']', '<', '2', ':', 'raise', 'ValueError', '(', "'At least 2 points are needed to compute'", "' area under curve, but x.shape = %s'", '%', 'x', '.', 'shape', ')', 'if', 'reorder', ':', '# reorder the data points according to the x axis and using y to', '# break ties', 'x', ',', 'y', '=', 'np', '.', 'array', '(', 'sorted', '(', 'points', 'for', 'points', 'in', 'zip', '(', 'x', ',', 'y', ')', ')', ')', '.', 'T', 'h', '=', 'np', '.', 'diff', '(', 'x', ')', 'else', ':', 'h', '=', 'np', '.', 'diff', '(', 'x', ')', 'if', 'np', '.', 'any', '(', 'h', '<', '0', ')', ':', 'h', '*=', '-', '1', 'assert', 'not', 'np', '.', 'any', '(', 'h', '<', '0', ')', ',', '(', '"Reordering is not turned on, and "', '"The x array is not increasing: %s"', '%', 'x', ')', 'area', '=', 'np', '.', 'sum', '(', 'h', '*', '(', 'y', '[', '1', ':', ']', '+', 'y', '[', ':', '-', '1', ']', ')', ')', '/', '2.0', 'return', 'area'] | Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general fuction, given points on a curve. For computing the area
under the ROC-curve, see :func:`auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
auc_score : Computes the area under the ROC curve | ['Compute', 'Area', 'Under', 'the', 'Curve', '(', 'AUC', ')', 'using', 'the', 'trapezoidal', 'rule'] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/evaluation.py#L46-L107 |
7,495 | andreagrandi/toshl-python | toshl/account.py | Account.get | def get(self, account_id):
"""
Return a specific account given its ID
"""
response = self.client._make_request('/accounts/{0}'.format(account_id))
return response.json() | python | def get(self, account_id):
"""
Return a specific account given its ID
"""
response = self.client._make_request('/accounts/{0}'.format(account_id))
return response.json() | ['def', 'get', '(', 'self', ',', 'account_id', ')', ':', 'response', '=', 'self', '.', 'client', '.', '_make_request', '(', "'/accounts/{0}'", '.', 'format', '(', 'account_id', ')', ')', 'return', 'response', '.', 'json', '(', ')'] | Return a specific account given its ID | ['Return', 'a', 'specific', 'account', 'given', 'its', 'ID'] | train | https://github.com/andreagrandi/toshl-python/blob/16a2aef8a0d389db73db3253b0bea3fcc33cc2bf/toshl/account.py#L23-L28 |
7,496 | MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.find_skill | def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill | python | def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill | ['def', 'find_skill', '(', 'self', ',', 'param', ',', 'author', '=', 'None', ',', 'skills', '=', 'None', ')', ':', '# type: (str, str, List[SkillEntry]) -> SkillEntry', 'if', 'param', '.', 'startswith', '(', "'https://'", ')', 'or', 'param', '.', 'startswith', '(', "'http://'", ')', ':', 'repo_id', '=', 'SkillEntry', '.', 'extract_repo_id', '(', 'param', ')', 'for', 'skill', 'in', 'self', '.', 'list', '(', ')', ':', 'if', 'skill', '.', 'id', '==', 'repo_id', ':', 'return', 'skill', 'name', '=', 'SkillEntry', '.', 'extract_repo_name', '(', 'param', ')', 'path', '=', 'SkillEntry', '.', 'create_path', '(', 'self', '.', 'skills_dir', ',', 'param', ')', 'return', 'SkillEntry', '(', 'name', ',', 'path', ',', 'param', ',', 'msm', '=', 'self', ')', 'else', ':', 'skill_confs', '=', '{', 'skill', ':', 'skill', '.', 'match', '(', 'param', ',', 'author', ')', 'for', 'skill', 'in', 'skills', 'or', 'self', '.', 'list', '(', ')', '}', 'best_skill', ',', 'score', '=', 'max', '(', 'skill_confs', '.', 'items', '(', ')', ',', 'key', '=', 'lambda', 'x', ':', 'x', '[', '1', ']', ')', 'LOG', '.', 'info', '(', "'Best match ({}): {} by {}'", '.', 'format', '(', 'round', '(', 'score', ',', '2', ')', ',', 'best_skill', '.', 'name', ',', 'best_skill', '.', 'author', ')', ')', 'if', 'score', '<', '0.3', ':', 'raise', 'SkillNotFound', '(', 'param', ')', 'low_bound', '=', '(', 'score', '*', '0.7', ')', 'if', 'score', '!=', '1.0', 'else', '1.0', 'close_skills', '=', '[', 'skill', 'for', 'skill', ',', 'conf', 'in', 'skill_confs', '.', 'items', '(', ')', 'if', 'conf', '>=', 'low_bound', 'and', 'skill', '!=', 'best_skill', ']', 'if', 'close_skills', ':', 'raise', 'MultipleSkillMatches', '(', '[', 'best_skill', ']', '+', 'close_skills', ')', 'return', 'best_skill'] | Find skill by name or url | ['Find', 'skill', 'by', 'name', 'or', 'url'] | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L332-L362 |
7,497 | mitsei/dlkit | dlkit/services/repository.py | Repository.use_federated_repository_view | def use_federated_repository_view(self):
"""Pass through to provider AssetLookupSession.use_federated_repository_view"""
self._repository_view = FEDERATED
# self._get_provider_session('asset_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_repository_view()
except AttributeError:
pass | python | def use_federated_repository_view(self):
"""Pass through to provider AssetLookupSession.use_federated_repository_view"""
self._repository_view = FEDERATED
# self._get_provider_session('asset_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_repository_view()
except AttributeError:
pass | ['def', 'use_federated_repository_view', '(', 'self', ')', ':', 'self', '.', '_repository_view', '=', 'FEDERATED', "# self._get_provider_session('asset_lookup_session') # To make sure the session is tracked", 'for', 'session', 'in', 'self', '.', '_get_provider_sessions', '(', ')', ':', 'try', ':', 'session', '.', 'use_federated_repository_view', '(', ')', 'except', 'AttributeError', ':', 'pass'] | Pass through to provider AssetLookupSession.use_federated_repository_view | ['Pass', 'through', 'to', 'provider', 'AssetLookupSession', '.', 'use_federated_repository_view'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/repository.py#L1483-L1491 |
7,498 | mikedh/trimesh | trimesh/grouping.py | unique_rows | def unique_rows(data, digits=None):
"""
Returns indices of unique rows. It will return the
first occurrence of a row that is duplicated:
[[1,2], [3,4], [1,2]] will return [0,1]
Parameters
---------
data: (n,m) set of floating point data
digits: how many digits to consider for the purposes of uniqueness
Returns
--------
unique: (j) array, index in data which is a unique row
inverse: (n) length array to reconstruct original
example: unique[inverse] == data
"""
hashes = hashable_rows(data, digits=digits)
garbage, unique, inverse = np.unique(hashes,
return_index=True,
return_inverse=True)
return unique, inverse | python | def unique_rows(data, digits=None):
"""
Returns indices of unique rows. It will return the
first occurrence of a row that is duplicated:
[[1,2], [3,4], [1,2]] will return [0,1]
Parameters
---------
data: (n,m) set of floating point data
digits: how many digits to consider for the purposes of uniqueness
Returns
--------
unique: (j) array, index in data which is a unique row
inverse: (n) length array to reconstruct original
example: unique[inverse] == data
"""
hashes = hashable_rows(data, digits=digits)
garbage, unique, inverse = np.unique(hashes,
return_index=True,
return_inverse=True)
return unique, inverse | ['def', 'unique_rows', '(', 'data', ',', 'digits', '=', 'None', ')', ':', 'hashes', '=', 'hashable_rows', '(', 'data', ',', 'digits', '=', 'digits', ')', 'garbage', ',', 'unique', ',', 'inverse', '=', 'np', '.', 'unique', '(', 'hashes', ',', 'return_index', '=', 'True', ',', 'return_inverse', '=', 'True', ')', 'return', 'unique', ',', 'inverse'] | Returns indices of unique rows. It will return the
first occurrence of a row that is duplicated:
[[1,2], [3,4], [1,2]] will return [0,1]
Parameters
---------
data: (n,m) set of floating point data
digits: how many digits to consider for the purposes of uniqueness
Returns
--------
unique: (j) array, index in data which is a unique row
inverse: (n) length array to reconstruct original
example: unique[inverse] == data | ['Returns', 'indices', 'of', 'unique', 'rows', '.', 'It', 'will', 'return', 'the', 'first', 'occurrence', 'of', 'a', 'row', 'that', 'is', 'duplicated', ':', '[[', '1', '2', ']', '[', '3', '4', ']', '[', '1', '2', ']]', 'will', 'return', '[', '0', '1', ']'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/grouping.py#L380-L401 |
7,499 | erdewit/ib_insync | ib_insync/util.py | barplot | def barplot(bars, title='', upColor='blue', downColor='red'):
"""
Create candlestick plot for the given bars. The bars can be given as
a DataFrame or as a list of bar objects.
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
if isinstance(bars, pd.DataFrame):
ohlcTups = [
tuple(v) for v in bars[['open', 'high', 'low', 'close']].values]
elif bars and hasattr(bars[0], 'open_'):
ohlcTups = [(b.open_, b.high, b.low, b.close) for b in bars]
else:
ohlcTups = [(b.open, b.high, b.low, b.close) for b in bars]
fig, ax = plt.subplots()
ax.set_title(title)
ax.grid(True)
fig.set_size_inches(10, 6)
for n, (open_, high, low, close) in enumerate(ohlcTups):
if close >= open_:
color = upColor
bodyHi, bodyLo = close, open_
else:
color = downColor
bodyHi, bodyLo = open_, close
line = Line2D(
xdata=(n, n),
ydata=(low, bodyLo),
color=color,
linewidth=1)
ax.add_line(line)
line = Line2D(
xdata=(n, n),
ydata=(high, bodyHi),
color=color,
linewidth=1)
ax.add_line(line)
rect = Rectangle(
xy=(n - 0.3, bodyLo),
width=0.6,
height=bodyHi - bodyLo,
edgecolor=color,
facecolor=color,
alpha=0.4,
antialiased=True
)
ax.add_patch(rect)
ax.autoscale_view()
return fig | python | def barplot(bars, title='', upColor='blue', downColor='red'):
"""
Create candlestick plot for the given bars. The bars can be given as
a DataFrame or as a list of bar objects.
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
if isinstance(bars, pd.DataFrame):
ohlcTups = [
tuple(v) for v in bars[['open', 'high', 'low', 'close']].values]
elif bars and hasattr(bars[0], 'open_'):
ohlcTups = [(b.open_, b.high, b.low, b.close) for b in bars]
else:
ohlcTups = [(b.open, b.high, b.low, b.close) for b in bars]
fig, ax = plt.subplots()
ax.set_title(title)
ax.grid(True)
fig.set_size_inches(10, 6)
for n, (open_, high, low, close) in enumerate(ohlcTups):
if close >= open_:
color = upColor
bodyHi, bodyLo = close, open_
else:
color = downColor
bodyHi, bodyLo = open_, close
line = Line2D(
xdata=(n, n),
ydata=(low, bodyLo),
color=color,
linewidth=1)
ax.add_line(line)
line = Line2D(
xdata=(n, n),
ydata=(high, bodyHi),
color=color,
linewidth=1)
ax.add_line(line)
rect = Rectangle(
xy=(n - 0.3, bodyLo),
width=0.6,
height=bodyHi - bodyLo,
edgecolor=color,
facecolor=color,
alpha=0.4,
antialiased=True
)
ax.add_patch(rect)
ax.autoscale_view()
return fig | ['def', 'barplot', '(', 'bars', ',', 'title', '=', "''", ',', 'upColor', '=', "'blue'", ',', 'downColor', '=', "'red'", ')', ':', 'import', 'pandas', 'as', 'pd', 'import', 'matplotlib', '.', 'pyplot', 'as', 'plt', 'from', 'matplotlib', '.', 'lines', 'import', 'Line2D', 'from', 'matplotlib', '.', 'patches', 'import', 'Rectangle', 'if', 'isinstance', '(', 'bars', ',', 'pd', '.', 'DataFrame', ')', ':', 'ohlcTups', '=', '[', 'tuple', '(', 'v', ')', 'for', 'v', 'in', 'bars', '[', '[', "'open'", ',', "'high'", ',', "'low'", ',', "'close'", ']', ']', '.', 'values', ']', 'elif', 'bars', 'and', 'hasattr', '(', 'bars', '[', '0', ']', ',', "'open_'", ')', ':', 'ohlcTups', '=', '[', '(', 'b', '.', 'open_', ',', 'b', '.', 'high', ',', 'b', '.', 'low', ',', 'b', '.', 'close', ')', 'for', 'b', 'in', 'bars', ']', 'else', ':', 'ohlcTups', '=', '[', '(', 'b', '.', 'open', ',', 'b', '.', 'high', ',', 'b', '.', 'low', ',', 'b', '.', 'close', ')', 'for', 'b', 'in', 'bars', ']', 'fig', ',', 'ax', '=', 'plt', '.', 'subplots', '(', ')', 'ax', '.', 'set_title', '(', 'title', ')', 'ax', '.', 'grid', '(', 'True', ')', 'fig', '.', 'set_size_inches', '(', '10', ',', '6', ')', 'for', 'n', ',', '(', 'open_', ',', 'high', ',', 'low', ',', 'close', ')', 'in', 'enumerate', '(', 'ohlcTups', ')', ':', 'if', 'close', '>=', 'open_', ':', 'color', '=', 'upColor', 'bodyHi', ',', 'bodyLo', '=', 'close', ',', 'open_', 'else', ':', 'color', '=', 'downColor', 'bodyHi', ',', 'bodyLo', '=', 'open_', ',', 'close', 'line', '=', 'Line2D', '(', 'xdata', '=', '(', 'n', ',', 'n', ')', ',', 'ydata', '=', '(', 'low', ',', 'bodyLo', ')', ',', 'color', '=', 'color', ',', 'linewidth', '=', '1', ')', 'ax', '.', 'add_line', '(', 'line', ')', 'line', '=', 'Line2D', '(', 'xdata', '=', '(', 'n', ',', 'n', ')', ',', 'ydata', '=', '(', 'high', ',', 'bodyHi', ')', ',', 'color', '=', 'color', ',', 'linewidth', '=', '1', ')', 'ax', '.', 'add_line', '(', 'line', ')', 'rect', '=', 'Rectangle', '(', 'xy', '=', '(', 'n', '-', '0.3', ',', 'bodyLo', ')', ',', 'width', '=', '0.6', ',', 'height', '=', 'bodyHi', '-', 'bodyLo', ',', 'edgecolor', '=', 'color', ',', 'facecolor', '=', 'color', ',', 'alpha', '=', '0.4', ',', 'antialiased', '=', 'True', ')', 'ax', '.', 'add_patch', '(', 'rect', ')', 'ax', '.', 'autoscale_view', '(', ')', 'return', 'fig'] | Create candlestick plot for the given bars. The bars can be given as
a DataFrame or as a list of bar objects. | ['Create', 'candlestick', 'plot', 'for', 'the', 'given', 'bars', '.', 'The', 'bars', 'can', 'be', 'given', 'as', 'a', 'DataFrame', 'or', 'as', 'a', 'list', 'of', 'bar', 'objects', '.'] | train | https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/util.py#L72-L125 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.