Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
4,700
Esri/ArcREST
src/arcrest/cmp/community.py
CommunityMapsProgram.metadataURL
def metadataURL(self, value): """gets/sets the public metadata url""" if value != self._metadataURL: self._metadataURL = value self._metaFS = None
python
def metadataURL(self, value): """gets/sets the public metadata url""" if value != self._metadataURL: self._metadataURL = value self._metaFS = None
['def', 'metadataURL', '(', 'self', ',', 'value', ')', ':', 'if', 'value', '!=', 'self', '.', '_metadataURL', ':', 'self', '.', '_metadataURL', '=', 'value', 'self', '.', '_metaFS', '=', 'None']
gets/sets the public metadata url
['gets', '/', 'sets', 'the', 'public', 'metadata', 'url']
train
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/cmp/community.py#L101-L105
4,701
bitcraze/crazyflie-lib-python
cflib/crazyflie/mem.py
LocoMemory2.update_data
def update_data(self, update_data_finished_cb): """Request an update of the anchor data""" if not self._update_data_finished_cb and self.nr_of_anchors > 0: self._update_data_finished_cb = update_data_finished_cb self.anchor_data = {} self.data_valid = False self._nr_of_anchors_to_fetch = self.nr_of_anchors logger.debug('Updating anchor data of memory {}'.format(self.id)) # Start reading the first anchor self._currently_fetching_index = 0 self._request_page(self.anchor_ids[self._currently_fetching_index])
python
def update_data(self, update_data_finished_cb): """Request an update of the anchor data""" if not self._update_data_finished_cb and self.nr_of_anchors > 0: self._update_data_finished_cb = update_data_finished_cb self.anchor_data = {} self.data_valid = False self._nr_of_anchors_to_fetch = self.nr_of_anchors logger.debug('Updating anchor data of memory {}'.format(self.id)) # Start reading the first anchor self._currently_fetching_index = 0 self._request_page(self.anchor_ids[self._currently_fetching_index])
['def', 'update_data', '(', 'self', ',', 'update_data_finished_cb', ')', ':', 'if', 'not', 'self', '.', '_update_data_finished_cb', 'and', 'self', '.', 'nr_of_anchors', '>', '0', ':', 'self', '.', '_update_data_finished_cb', '=', 'update_data_finished_cb', 'self', '.', 'anchor_data', '=', '{', '}', 'self', '.', 'data_valid', '=', 'False', 'self', '.', '_nr_of_anchors_to_fetch', '=', 'self', '.', 'nr_of_anchors', 'logger', '.', 'debug', '(', "'Updating anchor data of memory {}'", '.', 'format', '(', 'self', '.', 'id', ')', ')', '# Start reading the first anchor', 'self', '.', '_currently_fetching_index', '=', '0', 'self', '.', '_request_page', '(', 'self', '.', 'anchor_ids', '[', 'self', '.', '_currently_fetching_index', ']', ')']
Request an update of the anchor data
['Request', 'an', 'update', 'of', 'the', 'anchor', 'data']
train
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/mem.py#L604-L617
4,702
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
xmlTextReader.SetParserProp
def SetParserProp(self, prop, value): """Change the parser processing behaviour by changing some of its internal properties. Note that some properties can only be changed before any read has been done. """ ret = libxml2mod.xmlTextReaderSetParserProp(self._o, prop, value) return ret
python
def SetParserProp(self, prop, value): """Change the parser processing behaviour by changing some of its internal properties. Note that some properties can only be changed before any read has been done. """ ret = libxml2mod.xmlTextReaderSetParserProp(self._o, prop, value) return ret
['def', 'SetParserProp', '(', 'self', ',', 'prop', ',', 'value', ')', ':', 'ret', '=', 'libxml2mod', '.', 'xmlTextReaderSetParserProp', '(', 'self', '.', '_o', ',', 'prop', ',', 'value', ')', 'return', 'ret']
Change the parser processing behaviour by changing some of its internal properties. Note that some properties can only be changed before any read has been done.
['Change', 'the', 'parser', 'processing', 'behaviour', 'by', 'changing', 'some', 'of', 'its', 'internal', 'properties', '.', 'Note', 'that', 'some', 'properties', 'can', 'only', 'be', 'changed', 'before', 'any', 'read', 'has', 'been', 'done', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L6903-L6908
4,703
backbohne/docx-xslt
docxxslt/engines.py
XslEngine.render
def render(self, xml, context, raise_on_errors=True): """Render xml string and apply XSLT transfomation with context""" if xml: self.xml = xml # render XSL self.render_xsl(self.root, context) # create root XSL sheet xsl_ns = self.namespaces['xsl'] rootName = etree.QName(xsl_ns, 'stylesheet') root = etree.Element(rootName, nsmap={'xsl': xsl_ns}) sheet = etree.ElementTree(root) template = etree.SubElement(root, etree.QName(xsl_ns, "template"), match='/') # put OpenOffice tree into XSLT sheet template.append(self.root) self.root = root # drop XSL styles self.remove_style() #self.debug(self.xml) try: # transform XSL xsl = etree.XSLT(self.root) self.root = xsl(context) except etree.Error as e: # log errors for l in e.error_log: self.error("XSLT error at line %s col %s:" % (l.line, l.column)) self.error(" message: %s" % l.message) self.error(" domain: %s (%d)" % (l.domain_name, l.domain)) self.error(' type: %s (%d)' % (l.type_name, l.type)) self.error(' level: %s (%d)' % (l.level_name, l.level)) self.error(' filename: %s' % l.filename) if raise_on_errors: raise return self.xml else: return xml
python
def render(self, xml, context, raise_on_errors=True): """Render xml string and apply XSLT transfomation with context""" if xml: self.xml = xml # render XSL self.render_xsl(self.root, context) # create root XSL sheet xsl_ns = self.namespaces['xsl'] rootName = etree.QName(xsl_ns, 'stylesheet') root = etree.Element(rootName, nsmap={'xsl': xsl_ns}) sheet = etree.ElementTree(root) template = etree.SubElement(root, etree.QName(xsl_ns, "template"), match='/') # put OpenOffice tree into XSLT sheet template.append(self.root) self.root = root # drop XSL styles self.remove_style() #self.debug(self.xml) try: # transform XSL xsl = etree.XSLT(self.root) self.root = xsl(context) except etree.Error as e: # log errors for l in e.error_log: self.error("XSLT error at line %s col %s:" % (l.line, l.column)) self.error(" message: %s" % l.message) self.error(" domain: %s (%d)" % (l.domain_name, l.domain)) self.error(' type: %s (%d)' % (l.type_name, l.type)) self.error(' level: %s (%d)' % (l.level_name, l.level)) self.error(' filename: %s' % l.filename) if raise_on_errors: raise return self.xml else: return xml
['def', 'render', '(', 'self', ',', 'xml', ',', 'context', ',', 'raise_on_errors', '=', 'True', ')', ':', 'if', 'xml', ':', 'self', '.', 'xml', '=', 'xml', '# render XSL', 'self', '.', 'render_xsl', '(', 'self', '.', 'root', ',', 'context', ')', '# create root XSL sheet', 'xsl_ns', '=', 'self', '.', 'namespaces', '[', "'xsl'", ']', 'rootName', '=', 'etree', '.', 'QName', '(', 'xsl_ns', ',', "'stylesheet'", ')', 'root', '=', 'etree', '.', 'Element', '(', 'rootName', ',', 'nsmap', '=', '{', "'xsl'", ':', 'xsl_ns', '}', ')', 'sheet', '=', 'etree', '.', 'ElementTree', '(', 'root', ')', 'template', '=', 'etree', '.', 'SubElement', '(', 'root', ',', 'etree', '.', 'QName', '(', 'xsl_ns', ',', '"template"', ')', ',', 'match', '=', "'/'", ')', '# put OpenOffice tree into XSLT sheet', 'template', '.', 'append', '(', 'self', '.', 'root', ')', 'self', '.', 'root', '=', 'root', '# drop XSL styles', 'self', '.', 'remove_style', '(', ')', '#self.debug(self.xml)', 'try', ':', '# transform XSL', 'xsl', '=', 'etree', '.', 'XSLT', '(', 'self', '.', 'root', ')', 'self', '.', 'root', '=', 'xsl', '(', 'context', ')', 'except', 'etree', '.', 'Error', 'as', 'e', ':', '# log errors', 'for', 'l', 'in', 'e', '.', 'error_log', ':', 'self', '.', 'error', '(', '"XSLT error at line %s col %s:"', '%', '(', 'l', '.', 'line', ',', 'l', '.', 'column', ')', ')', 'self', '.', 'error', '(', '" message: %s"', '%', 'l', '.', 'message', ')', 'self', '.', 'error', '(', '" domain: %s (%d)"', '%', '(', 'l', '.', 'domain_name', ',', 'l', '.', 'domain', ')', ')', 'self', '.', 'error', '(', "' type: %s (%d)'", '%', '(', 'l', '.', 'type_name', ',', 'l', '.', 'type', ')', ')', 'self', '.', 'error', '(', "' level: %s (%d)'", '%', '(', 'l', '.', 'level_name', ',', 'l', '.', 'level', ')', ')', 'self', '.', 'error', '(', "' filename: %s'", '%', 'l', '.', 'filename', ')', 'if', 'raise_on_errors', ':', 'raise', 'return', 'self', '.', 'xml', 'else', ':', 'return', 'xml']
Render xml string and apply XSLT transfomation with context
['Render', 'xml', 'string', 'and', 'apply', 'XSLT', 'transfomation', 'with', 'context']
train
https://github.com/backbohne/docx-xslt/blob/d4cc76776a75b8213660c3c1717d42afe5189e15/docxxslt/engines.py#L105-L151
4,704
saltstack/salt
salt/modules/kubernetesmod.py
services
def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
python
def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
['def', 'services', '(', 'namespace', '=', "'default'", ',', '*', '*', 'kwargs', ')', ':', 'cfg', '=', '_setup_conn', '(', '*', '*', 'kwargs', ')', 'try', ':', 'api_instance', '=', 'kubernetes', '.', 'client', '.', 'CoreV1Api', '(', ')', 'api_response', '=', 'api_instance', '.', 'list_namespaced_service', '(', 'namespace', ')', 'return', '[', 'srv', '[', "'metadata'", ']', '[', "'name'", ']', 'for', 'srv', 'in', 'api_response', '.', 'to_dict', '(', ')', '.', 'get', '(', "'items'", ')', ']', 'except', '(', 'ApiException', ',', 'HTTPError', ')', 'as', 'exc', ':', 'if', 'isinstance', '(', 'exc', ',', 'ApiException', ')', 'and', 'exc', '.', 'status', '==', '404', ':', 'return', 'None', 'else', ':', 'log', '.', 'exception', '(', "'Exception when calling '", "'CoreV1Api->list_namespaced_service'", ')', 'raise', 'CommandExecutionError', '(', 'exc', ')', 'finally', ':', '_cleanup', '(', '*', '*', 'cfg', ')']
Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default
['Return', 'a', 'list', 'of', 'kubernetes', 'services', 'defined', 'in', 'the', 'namespace']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L457-L482
4,705
BlueBrain/hpcbench
hpcbench/toolbox/process.py
find_executable
def find_executable(name, names=None, required=True): """Utility function to find an executable in PATH name: program to find. Use given value if absolute path names: list of additional names. For instance >>> find_executable('sed', names=['gsed']) required: If True, then the function raises an Exception if the program is not found else the function returns name if the program is not found. """ path_from_env = os.environ.get(name.upper()) if path_from_env is not None: return path_from_env names = [name] + (names or []) for _name in names: if osp.isabs(_name): return _name paths = os.environ.get('PATH', '').split(os.pathsep) eax = find_in_paths(_name, paths) if eax: return eax if required: raise NameError('Could not find %s executable' % name) else: return name
python
def find_executable(name, names=None, required=True): """Utility function to find an executable in PATH name: program to find. Use given value if absolute path names: list of additional names. For instance >>> find_executable('sed', names=['gsed']) required: If True, then the function raises an Exception if the program is not found else the function returns name if the program is not found. """ path_from_env = os.environ.get(name.upper()) if path_from_env is not None: return path_from_env names = [name] + (names or []) for _name in names: if osp.isabs(_name): return _name paths = os.environ.get('PATH', '').split(os.pathsep) eax = find_in_paths(_name, paths) if eax: return eax if required: raise NameError('Could not find %s executable' % name) else: return name
['def', 'find_executable', '(', 'name', ',', 'names', '=', 'None', ',', 'required', '=', 'True', ')', ':', 'path_from_env', '=', 'os', '.', 'environ', '.', 'get', '(', 'name', '.', 'upper', '(', ')', ')', 'if', 'path_from_env', 'is', 'not', 'None', ':', 'return', 'path_from_env', 'names', '=', '[', 'name', ']', '+', '(', 'names', 'or', '[', ']', ')', 'for', '_name', 'in', 'names', ':', 'if', 'osp', '.', 'isabs', '(', '_name', ')', ':', 'return', '_name', 'paths', '=', 'os', '.', 'environ', '.', 'get', '(', "'PATH'", ',', "''", ')', '.', 'split', '(', 'os', '.', 'pathsep', ')', 'eax', '=', 'find_in_paths', '(', '_name', ',', 'paths', ')', 'if', 'eax', ':', 'return', 'eax', 'if', 'required', ':', 'raise', 'NameError', '(', "'Could not find %s executable'", '%', 'name', ')', 'else', ':', 'return', 'name']
Utility function to find an executable in PATH name: program to find. Use given value if absolute path names: list of additional names. For instance >>> find_executable('sed', names=['gsed']) required: If True, then the function raises an Exception if the program is not found else the function returns name if the program is not found.
['Utility', 'function', 'to', 'find', 'an', 'executable', 'in', 'PATH', 'name', ':', 'program', 'to', 'find', '.', 'Use', 'given', 'value', 'if', 'absolute', 'path']
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/process.py#L28-L54
4,706
vertexproject/synapse
synapse/cortex.py
CoreApi.delTrigger
async def delTrigger(self, iden): ''' Deletes a trigger from the cortex ''' trig = self.cell.triggers.get(iden) self._trig_auth_check(trig.get('useriden')) self.cell.triggers.delete(iden)
python
async def delTrigger(self, iden): ''' Deletes a trigger from the cortex ''' trig = self.cell.triggers.get(iden) self._trig_auth_check(trig.get('useriden')) self.cell.triggers.delete(iden)
['async', 'def', 'delTrigger', '(', 'self', ',', 'iden', ')', ':', 'trig', '=', 'self', '.', 'cell', '.', 'triggers', '.', 'get', '(', 'iden', ')', 'self', '.', '_trig_auth_check', '(', 'trig', '.', 'get', '(', "'useriden'", ')', ')', 'self', '.', 'cell', '.', 'triggers', '.', 'delete', '(', 'iden', ')']
Deletes a trigger from the cortex
['Deletes', 'a', 'trigger', 'from', 'the', 'cortex']
train
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cortex.py#L217-L223
4,707
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
segment_radial_distances
def segment_radial_distances(neurites, neurite_type=NeuriteType.all, origin=None): '''Lengths of the segments in a collection of neurites''' def _seg_rd(sec, pos): '''list of radial distances of all segments of a section''' # TODO: remove this disable when pylint is fixed # pylint: disable=assignment-from-no-return mid_pts = np.divide(np.add(sec.points[:-1], sec.points[1:])[:, :3], 2.0) return np.sqrt([morphmath.point_dist2(p, pos) for p in mid_pts]) dist = [] for n in iter_neurites(neurites, filt=is_type(neurite_type)): pos = n.root_node.points[0] if origin is None else origin dist.extend([s for ss in n.iter_sections() for s in _seg_rd(ss, pos)]) return dist
python
def segment_radial_distances(neurites, neurite_type=NeuriteType.all, origin=None): '''Lengths of the segments in a collection of neurites''' def _seg_rd(sec, pos): '''list of radial distances of all segments of a section''' # TODO: remove this disable when pylint is fixed # pylint: disable=assignment-from-no-return mid_pts = np.divide(np.add(sec.points[:-1], sec.points[1:])[:, :3], 2.0) return np.sqrt([morphmath.point_dist2(p, pos) for p in mid_pts]) dist = [] for n in iter_neurites(neurites, filt=is_type(neurite_type)): pos = n.root_node.points[0] if origin is None else origin dist.extend([s for ss in n.iter_sections() for s in _seg_rd(ss, pos)]) return dist
['def', 'segment_radial_distances', '(', 'neurites', ',', 'neurite_type', '=', 'NeuriteType', '.', 'all', ',', 'origin', '=', 'None', ')', ':', 'def', '_seg_rd', '(', 'sec', ',', 'pos', ')', ':', "'''list of radial distances of all segments of a section'''", '# TODO: remove this disable when pylint is fixed', '# pylint: disable=assignment-from-no-return', 'mid_pts', '=', 'np', '.', 'divide', '(', 'np', '.', 'add', '(', 'sec', '.', 'points', '[', ':', '-', '1', ']', ',', 'sec', '.', 'points', '[', '1', ':', ']', ')', '[', ':', ',', ':', '3', ']', ',', '2.0', ')', 'return', 'np', '.', 'sqrt', '(', '[', 'morphmath', '.', 'point_dist2', '(', 'p', ',', 'pos', ')', 'for', 'p', 'in', 'mid_pts', ']', ')', 'dist', '=', '[', ']', 'for', 'n', 'in', 'iter_neurites', '(', 'neurites', ',', 'filt', '=', 'is_type', '(', 'neurite_type', ')', ')', ':', 'pos', '=', 'n', '.', 'root_node', '.', 'points', '[', '0', ']', 'if', 'origin', 'is', 'None', 'else', 'origin', 'dist', '.', 'extend', '(', '[', 's', 'for', 'ss', 'in', 'n', '.', 'iter_sections', '(', ')', 'for', 's', 'in', '_seg_rd', '(', 'ss', ',', 'pos', ')', ']', ')', 'return', 'dist']
Lengths of the segments in a collection of neurites
['Lengths', 'of', 'the', 'segments', 'in', 'a', 'collection', 'of', 'neurites']
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L263-L277
4,708
glormph/msstitch
src/app/writers/pycolator.py
write_percolator_xml
def write_percolator_xml(staticxml, feats, fn): """Given the static percolator xml root and process info nodes, and all psms and peptides as iterators in a dict {'peptide': pep_iterator, 'psm': psm_iterator}, this generates percolator out data into a file.""" # First get xml until psms opening element is found. etree.SubElement(staticxml, 'psms').text = '***psms***' root = etree.tostring(staticxml, pretty_print=True, xml_declaration=True, encoding='UTF-8') root = root.decode('utf-8') root = root[:root.find('***psms***')] # Write opening xml with open(fn, 'w') as fp: fp.write(root) fp.write('\n') # Then write features with open(fn, 'a') as fp: psmcount = 0 for psm in feats['psm']: psmcount += 1 fp.write(psm) fp.write('\n') fp.write('</psms><peptides>\n') peptidecount = 0 for pep in feats['peptide']: peptidecount += 1 fp.write(pep) fp.write('\n') fp.write('</peptides></percolator_output>') print('Wrote {0} psms, {1} peptides to file {2}'.format(psmcount, peptidecount, fn))
python
def write_percolator_xml(staticxml, feats, fn): """Given the static percolator xml root and process info nodes, and all psms and peptides as iterators in a dict {'peptide': pep_iterator, 'psm': psm_iterator}, this generates percolator out data into a file.""" # First get xml until psms opening element is found. etree.SubElement(staticxml, 'psms').text = '***psms***' root = etree.tostring(staticxml, pretty_print=True, xml_declaration=True, encoding='UTF-8') root = root.decode('utf-8') root = root[:root.find('***psms***')] # Write opening xml with open(fn, 'w') as fp: fp.write(root) fp.write('\n') # Then write features with open(fn, 'a') as fp: psmcount = 0 for psm in feats['psm']: psmcount += 1 fp.write(psm) fp.write('\n') fp.write('</psms><peptides>\n') peptidecount = 0 for pep in feats['peptide']: peptidecount += 1 fp.write(pep) fp.write('\n') fp.write('</peptides></percolator_output>') print('Wrote {0} psms, {1} peptides to file {2}'.format(psmcount, peptidecount, fn))
['def', 'write_percolator_xml', '(', 'staticxml', ',', 'feats', ',', 'fn', ')', ':', '# First get xml until psms opening element is found.', 'etree', '.', 'SubElement', '(', 'staticxml', ',', "'psms'", ')', '.', 'text', '=', "'***psms***'", 'root', '=', 'etree', '.', 'tostring', '(', 'staticxml', ',', 'pretty_print', '=', 'True', ',', 'xml_declaration', '=', 'True', ',', 'encoding', '=', "'UTF-8'", ')', 'root', '=', 'root', '.', 'decode', '(', "'utf-8'", ')', 'root', '=', 'root', '[', ':', 'root', '.', 'find', '(', "'***psms***'", ')', ']', '# Write opening xml', 'with', 'open', '(', 'fn', ',', "'w'", ')', 'as', 'fp', ':', 'fp', '.', 'write', '(', 'root', ')', 'fp', '.', 'write', '(', "'\\n'", ')', '# Then write features', 'with', 'open', '(', 'fn', ',', "'a'", ')', 'as', 'fp', ':', 'psmcount', '=', '0', 'for', 'psm', 'in', 'feats', '[', "'psm'", ']', ':', 'psmcount', '+=', '1', 'fp', '.', 'write', '(', 'psm', ')', 'fp', '.', 'write', '(', "'\\n'", ')', 'fp', '.', 'write', '(', "'</psms><peptides>\\n'", ')', 'peptidecount', '=', '0', 'for', 'pep', 'in', 'feats', '[', "'peptide'", ']', ':', 'peptidecount', '+=', '1', 'fp', '.', 'write', '(', 'pep', ')', 'fp', '.', 'write', '(', "'\\n'", ')', 'fp', '.', 'write', '(', "'</peptides></percolator_output>'", ')', 'print', '(', "'Wrote {0} psms, {1} peptides to file {2}'", '.', 'format', '(', 'psmcount', ',', 'peptidecount', ',', 'fn', ')', ')']
Given the static percolator xml root and process info nodes, and all psms and peptides as iterators in a dict {'peptide': pep_iterator, 'psm': psm_iterator}, this generates percolator out data into a file.
['Given', 'the', 'static', 'percolator', 'xml', 'root', 'and', 'process', 'info', 'nodes', 'and', 'all', 'psms', 'and', 'peptides', 'as', 'iterators', 'in', 'a', 'dict', '{', 'peptide', ':', 'pep_iterator', 'psm', ':', 'psm_iterator', '}', 'this', 'generates', 'percolator', 'out', 'data', 'into', 'a', 'file', '.']
train
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/writers/pycolator.py#L4-L37
4,709
KrzyHonk/bpmn-python
bpmn_python/graph/classes/events/throw_event_type.py
ThrowEvent.set_event_definition_list
def set_event_definition_list(self, value): """ Setter for 'event_definition_list' field. :param value - a new value of 'event_definition_list' field. Must be a list of EventDefinition objects """ if value is None or not isinstance(value, list): raise TypeError("EventDefinitionList new value must be a list") else: for element in value: if not isinstance(element, event_definition.EventDefinition): raise TypeError("EventDefinitionList elements in variable must be of Lane class") self.__event_definition_list = value
python
def set_event_definition_list(self, value): """ Setter for 'event_definition_list' field. :param value - a new value of 'event_definition_list' field. Must be a list of EventDefinition objects """ if value is None or not isinstance(value, list): raise TypeError("EventDefinitionList new value must be a list") else: for element in value: if not isinstance(element, event_definition.EventDefinition): raise TypeError("EventDefinitionList elements in variable must be of Lane class") self.__event_definition_list = value
['def', 'set_event_definition_list', '(', 'self', ',', 'value', ')', ':', 'if', 'value', 'is', 'None', 'or', 'not', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'raise', 'TypeError', '(', '"EventDefinitionList new value must be a list"', ')', 'else', ':', 'for', 'element', 'in', 'value', ':', 'if', 'not', 'isinstance', '(', 'element', ',', 'event_definition', '.', 'EventDefinition', ')', ':', 'raise', 'TypeError', '(', '"EventDefinitionList elements in variable must be of Lane class"', ')', 'self', '.', '__event_definition_list', '=', 'value']
Setter for 'event_definition_list' field. :param value - a new value of 'event_definition_list' field. Must be a list of EventDefinition objects
['Setter', 'for', 'event_definition_list', 'field', '.', ':', 'param', 'value', '-', 'a', 'new', 'value', 'of', 'event_definition_list', 'field', '.', 'Must', 'be', 'a', 'list', 'of', 'EventDefinition', 'objects']
train
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/graph/classes/events/throw_event_type.py#L30-L41
4,710
project-ncl/pnc-cli
pnc_cli/tools/config_utils.py
ConfigReader.get_dependency_structure
def get_dependency_structure(self, artifact=None, include_dependencies=False): """ Reads dependency structure. If an artifact is passed in you get only its dependencies otherwise the complete structure is returned. :param artifact: an artifact task or artifact name if only an artifact's deps are needed :param include_dependencies: flag to include also dependencies in returned artifacts and their dependencies in dependencies dict :return: tuple of artifact names list and dependencies dictionary where value is a Task list """ artifacts = [] dependencies_dict = {} if artifact: if isinstance(artifact, str): artifact = self.get_tasks().get_task(artifact) artifacts.append(artifact.name) dependencies_dict[artifact.name] = artifact.ordered_dependencies() if include_dependencies: for dep in dependencies_dict[artifact.name]: artifacts.append(dep.name) dependencies_dict[dep.name] = dep.ordered_dependencies() else: for key, task in self.get_tasks().tasks.iteritems(): artifacts.append(task.name) dependencies_dict[task.name] = task.ordered_dependencies() return artifacts, dependencies_dict
python
def get_dependency_structure(self, artifact=None, include_dependencies=False): """ Reads dependency structure. If an artifact is passed in you get only its dependencies otherwise the complete structure is returned. :param artifact: an artifact task or artifact name if only an artifact's deps are needed :param include_dependencies: flag to include also dependencies in returned artifacts and their dependencies in dependencies dict :return: tuple of artifact names list and dependencies dictionary where value is a Task list """ artifacts = [] dependencies_dict = {} if artifact: if isinstance(artifact, str): artifact = self.get_tasks().get_task(artifact) artifacts.append(artifact.name) dependencies_dict[artifact.name] = artifact.ordered_dependencies() if include_dependencies: for dep in dependencies_dict[artifact.name]: artifacts.append(dep.name) dependencies_dict[dep.name] = dep.ordered_dependencies() else: for key, task in self.get_tasks().tasks.iteritems(): artifacts.append(task.name) dependencies_dict[task.name] = task.ordered_dependencies() return artifacts, dependencies_dict
['def', 'get_dependency_structure', '(', 'self', ',', 'artifact', '=', 'None', ',', 'include_dependencies', '=', 'False', ')', ':', 'artifacts', '=', '[', ']', 'dependencies_dict', '=', '{', '}', 'if', 'artifact', ':', 'if', 'isinstance', '(', 'artifact', ',', 'str', ')', ':', 'artifact', '=', 'self', '.', 'get_tasks', '(', ')', '.', 'get_task', '(', 'artifact', ')', 'artifacts', '.', 'append', '(', 'artifact', '.', 'name', ')', 'dependencies_dict', '[', 'artifact', '.', 'name', ']', '=', 'artifact', '.', 'ordered_dependencies', '(', ')', 'if', 'include_dependencies', ':', 'for', 'dep', 'in', 'dependencies_dict', '[', 'artifact', '.', 'name', ']', ':', 'artifacts', '.', 'append', '(', 'dep', '.', 'name', ')', 'dependencies_dict', '[', 'dep', '.', 'name', ']', '=', 'dep', '.', 'ordered_dependencies', '(', ')', 'else', ':', 'for', 'key', ',', 'task', 'in', 'self', '.', 'get_tasks', '(', ')', '.', 'tasks', '.', 'iteritems', '(', ')', ':', 'artifacts', '.', 'append', '(', 'task', '.', 'name', ')', 'dependencies_dict', '[', 'task', '.', 'name', ']', '=', 'task', '.', 'ordered_dependencies', '(', ')', 'return', 'artifacts', ',', 'dependencies_dict']
Reads dependency structure. If an artifact is passed in you get only its dependencies otherwise the complete structure is returned. :param artifact: an artifact task or artifact name if only an artifact's deps are needed :param include_dependencies: flag to include also dependencies in returned artifacts and their dependencies in dependencies dict :return: tuple of artifact names list and dependencies dictionary where value is a Task list
['Reads', 'dependency', 'structure', '.', 'If', 'an', 'artifact', 'is', 'passed', 'in', 'you', 'get', 'only', 'its', 'dependencies', 'otherwise', 'the', 'complete', 'structure', 'is', 'returned', '.']
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/tools/config_utils.py#L149-L176
4,711
RudolfCardinal/pythonlib
cardinal_pythonlib/psychiatry/treatment_resistant_depression.py
timedelta_days
def timedelta_days(days: int) -> timedelta64: """ Convert a duration in days to a NumPy ``timedelta64`` object. """ int_days = int(days) if int_days != days: raise ValueError("Fractional days passed to timedelta_days: " "{!r}".format(days)) try: # Do not pass e.g. 27.0; that will raise a ValueError. # Must be an actual int: return timedelta64(int_days, 'D') except ValueError as e: raise ValueError("Failure in timedelta_days; value was {!r}; original " "error was: {}".format(days, e))
python
def timedelta_days(days: int) -> timedelta64: """ Convert a duration in days to a NumPy ``timedelta64`` object. """ int_days = int(days) if int_days != days: raise ValueError("Fractional days passed to timedelta_days: " "{!r}".format(days)) try: # Do not pass e.g. 27.0; that will raise a ValueError. # Must be an actual int: return timedelta64(int_days, 'D') except ValueError as e: raise ValueError("Failure in timedelta_days; value was {!r}; original " "error was: {}".format(days, e))
['def', 'timedelta_days', '(', 'days', ':', 'int', ')', '->', 'timedelta64', ':', 'int_days', '=', 'int', '(', 'days', ')', 'if', 'int_days', '!=', 'days', ':', 'raise', 'ValueError', '(', '"Fractional days passed to timedelta_days: "', '"{!r}"', '.', 'format', '(', 'days', ')', ')', 'try', ':', '# Do not pass e.g. 27.0; that will raise a ValueError.', '# Must be an actual int:', 'return', 'timedelta64', '(', 'int_days', ',', "'D'", ')', 'except', 'ValueError', 'as', 'e', ':', 'raise', 'ValueError', '(', '"Failure in timedelta_days; value was {!r}; original "', '"error was: {}"', '.', 'format', '(', 'days', ',', 'e', ')', ')']
Convert a duration in days to a NumPy ``timedelta64`` object.
['Convert', 'a', 'duration', 'in', 'days', 'to', 'a', 'NumPy', 'timedelta64', 'object', '.']
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/psychiatry/treatment_resistant_depression.py#L121-L135
4,712
xhtml2pdf/xhtml2pdf
xhtml2pdf/util.py
getFrameDimensions
def getFrameDimensions(data, page_width, page_height): """Calculate dimensions of a frame Returns left, top, width and height of the frame in points. """ box = data.get("-pdf-frame-box", []) if len(box) == 4: return [getSize(x) for x in box] top = getSize(data.get("top", 0)) left = getSize(data.get("left", 0)) bottom = getSize(data.get("bottom", 0)) right = getSize(data.get("right", 0)) if "height" in data: height = getSize(data["height"]) if "top" in data: top = getSize(data["top"]) bottom = page_height - (top + height) elif "bottom" in data: bottom = getSize(data["bottom"]) top = page_height - (bottom + height) if "width" in data: width = getSize(data["width"]) if "left" in data: left = getSize(data["left"]) right = page_width - (left + width) elif "right" in data: right = getSize(data["right"]) left = page_width - (right + width) top += getSize(data.get("margin-top", 0)) left += getSize(data.get("margin-left", 0)) bottom += getSize(data.get("margin-bottom", 0)) right += getSize(data.get("margin-right", 0)) width = page_width - (left + right) height = page_height - (top + bottom) return left, top, width, height
python
def getFrameDimensions(data, page_width, page_height): """Calculate dimensions of a frame Returns left, top, width and height of the frame in points. """ box = data.get("-pdf-frame-box", []) if len(box) == 4: return [getSize(x) for x in box] top = getSize(data.get("top", 0)) left = getSize(data.get("left", 0)) bottom = getSize(data.get("bottom", 0)) right = getSize(data.get("right", 0)) if "height" in data: height = getSize(data["height"]) if "top" in data: top = getSize(data["top"]) bottom = page_height - (top + height) elif "bottom" in data: bottom = getSize(data["bottom"]) top = page_height - (bottom + height) if "width" in data: width = getSize(data["width"]) if "left" in data: left = getSize(data["left"]) right = page_width - (left + width) elif "right" in data: right = getSize(data["right"]) left = page_width - (right + width) top += getSize(data.get("margin-top", 0)) left += getSize(data.get("margin-left", 0)) bottom += getSize(data.get("margin-bottom", 0)) right += getSize(data.get("margin-right", 0)) width = page_width - (left + right) height = page_height - (top + bottom) return left, top, width, height
['def', 'getFrameDimensions', '(', 'data', ',', 'page_width', ',', 'page_height', ')', ':', 'box', '=', 'data', '.', 'get', '(', '"-pdf-frame-box"', ',', '[', ']', ')', 'if', 'len', '(', 'box', ')', '==', '4', ':', 'return', '[', 'getSize', '(', 'x', ')', 'for', 'x', 'in', 'box', ']', 'top', '=', 'getSize', '(', 'data', '.', 'get', '(', '"top"', ',', '0', ')', ')', 'left', '=', 'getSize', '(', 'data', '.', 'get', '(', '"left"', ',', '0', ')', ')', 'bottom', '=', 'getSize', '(', 'data', '.', 'get', '(', '"bottom"', ',', '0', ')', ')', 'right', '=', 'getSize', '(', 'data', '.', 'get', '(', '"right"', ',', '0', ')', ')', 'if', '"height"', 'in', 'data', ':', 'height', '=', 'getSize', '(', 'data', '[', '"height"', ']', ')', 'if', '"top"', 'in', 'data', ':', 'top', '=', 'getSize', '(', 'data', '[', '"top"', ']', ')', 'bottom', '=', 'page_height', '-', '(', 'top', '+', 'height', ')', 'elif', '"bottom"', 'in', 'data', ':', 'bottom', '=', 'getSize', '(', 'data', '[', '"bottom"', ']', ')', 'top', '=', 'page_height', '-', '(', 'bottom', '+', 'height', ')', 'if', '"width"', 'in', 'data', ':', 'width', '=', 'getSize', '(', 'data', '[', '"width"', ']', ')', 'if', '"left"', 'in', 'data', ':', 'left', '=', 'getSize', '(', 'data', '[', '"left"', ']', ')', 'right', '=', 'page_width', '-', '(', 'left', '+', 'width', ')', 'elif', '"right"', 'in', 'data', ':', 'right', '=', 'getSize', '(', 'data', '[', '"right"', ']', ')', 'left', '=', 'page_width', '-', '(', 'right', '+', 'width', ')', 'top', '+=', 'getSize', '(', 'data', '.', 'get', '(', '"margin-top"', ',', '0', ')', ')', 'left', '+=', 'getSize', '(', 'data', '.', 'get', '(', '"margin-left"', ',', '0', ')', ')', 'bottom', '+=', 'getSize', '(', 'data', '.', 'get', '(', '"margin-bottom"', ',', '0', ')', ')', 'right', '+=', 'getSize', '(', 'data', '.', 'get', '(', '"margin-right"', ',', '0', ')', ')', 'width', '=', 'page_width', '-', '(', 'left', '+', 'right', ')', 'height', '=', 'page_height', '-', '(', 'top', '+', 'bottom', ')', 'return', 'left', ',', 'top', ',', 'width', ',', 'height']
Calculate dimensions of a frame Returns left, top, width and height of the frame in points.
['Calculate', 'dimensions', 'of', 'a', 'frame']
train
https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/util.py#L372-L407
4,713
DarkEnergySurvey/ugali
ugali/utils/healpix.py
index_pix_in_pixels
def index_pix_in_pixels(pix,pixels,sort=False,outside=-1): """ Find the indices of a set of pixels into another set of pixels. !!! ASSUMES SORTED PIXELS !!! Parameters: ----------- pix : set of search pixels pixels : set of reference pixels Returns: -------- index : index into the reference pixels """ # ADW: Not really safe to set index = -1 (accesses last entry); # -np.inf would be better, but breaks other code... # ADW: Are the pixels always sorted? Is there a quick way to check? if sort: pixels = np.sort(pixels) # Assumes that 'pixels' is pre-sorted, otherwise...??? index = np.searchsorted(pixels,pix) if np.isscalar(index): if not np.in1d(pix,pixels).any(): index = outside else: # Find objects that are outside the pixels index[~np.in1d(pix,pixels)] = outside return index
python
def index_pix_in_pixels(pix,pixels,sort=False,outside=-1): """ Find the indices of a set of pixels into another set of pixels. !!! ASSUMES SORTED PIXELS !!! Parameters: ----------- pix : set of search pixels pixels : set of reference pixels Returns: -------- index : index into the reference pixels """ # ADW: Not really safe to set index = -1 (accesses last entry); # -np.inf would be better, but breaks other code... # ADW: Are the pixels always sorted? Is there a quick way to check? if sort: pixels = np.sort(pixels) # Assumes that 'pixels' is pre-sorted, otherwise...??? index = np.searchsorted(pixels,pix) if np.isscalar(index): if not np.in1d(pix,pixels).any(): index = outside else: # Find objects that are outside the pixels index[~np.in1d(pix,pixels)] = outside return index
['def', 'index_pix_in_pixels', '(', 'pix', ',', 'pixels', ',', 'sort', '=', 'False', ',', 'outside', '=', '-', '1', ')', ':', '# ADW: Not really safe to set index = -1 (accesses last entry); ', '# -np.inf would be better, but breaks other code...', '# ADW: Are the pixels always sorted? Is there a quick way to check?', 'if', 'sort', ':', 'pixels', '=', 'np', '.', 'sort', '(', 'pixels', ')', "# Assumes that 'pixels' is pre-sorted, otherwise...???", 'index', '=', 'np', '.', 'searchsorted', '(', 'pixels', ',', 'pix', ')', 'if', 'np', '.', 'isscalar', '(', 'index', ')', ':', 'if', 'not', 'np', '.', 'in1d', '(', 'pix', ',', 'pixels', ')', '.', 'any', '(', ')', ':', 'index', '=', 'outside', 'else', ':', '# Find objects that are outside the pixels', 'index', '[', '~', 'np', '.', 'in1d', '(', 'pix', ',', 'pixels', ')', ']', '=', 'outside', 'return', 'index']
Find the indices of a set of pixels into another set of pixels. !!! ASSUMES SORTED PIXELS !!! Parameters: ----------- pix : set of search pixels pixels : set of reference pixels Returns: -------- index : index into the reference pixels
['Find', 'the', 'indices', 'of', 'a', 'set', 'of', 'pixels', 'into', 'another', 'set', 'of', 'pixels', '.', '!!!', 'ASSUMES', 'SORTED', 'PIXELS', '!!!']
train
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L217-L244
4,714
joshleeb/creditcard
creditcard/formatter.py
is_visa
def is_visa(n): """Checks if credit card number fits the visa format.""" n, length = str(n), len(str(n)) if length >= 13 and length <= 16: if n[0] == '4': return True return False
python
def is_visa(n): """Checks if credit card number fits the visa format.""" n, length = str(n), len(str(n)) if length >= 13 and length <= 16: if n[0] == '4': return True return False
['def', 'is_visa', '(', 'n', ')', ':', 'n', ',', 'length', '=', 'str', '(', 'n', ')', ',', 'len', '(', 'str', '(', 'n', ')', ')', 'if', 'length', '>=', '13', 'and', 'length', '<=', '16', ':', 'if', 'n', '[', '0', ']', '==', "'4'", ':', 'return', 'True', 'return', 'False']
Checks if credit card number fits the visa format.
['Checks', 'if', 'credit', 'card', 'number', 'fits', 'the', 'visa', 'format', '.']
train
https://github.com/joshleeb/creditcard/blob/8cff49ba80029026c7e221764eb2387eb2e04a4c/creditcard/formatter.py#L1-L8
4,715
abakan-zz/napi
napi/transformers.py
NapiTransformer.visit_UnaryOp
def visit_UnaryOp(self, node): """Interfere with ``not`` operation to :func:`numpy.logical_not`.""" if isinstance(node.op, Not): self._debug('UnaryOp', node.op, incr=1) operand = self[node.operand] self._debug('|-', operand, incr=2) tn = self._tn() result = numpy.logical_not(operand) self._debug('|_', result, incr=2) self[tn] = result return ast_name(tn) else: return self.generic_visit(node)
python
def visit_UnaryOp(self, node): """Interfere with ``not`` operation to :func:`numpy.logical_not`.""" if isinstance(node.op, Not): self._debug('UnaryOp', node.op, incr=1) operand = self[node.operand] self._debug('|-', operand, incr=2) tn = self._tn() result = numpy.logical_not(operand) self._debug('|_', result, incr=2) self[tn] = result return ast_name(tn) else: return self.generic_visit(node)
['def', 'visit_UnaryOp', '(', 'self', ',', 'node', ')', ':', 'if', 'isinstance', '(', 'node', '.', 'op', ',', 'Not', ')', ':', 'self', '.', '_debug', '(', "'UnaryOp'", ',', 'node', '.', 'op', ',', 'incr', '=', '1', ')', 'operand', '=', 'self', '[', 'node', '.', 'operand', ']', 'self', '.', '_debug', '(', "'|-'", ',', 'operand', ',', 'incr', '=', '2', ')', 'tn', '=', 'self', '.', '_tn', '(', ')', 'result', '=', 'numpy', '.', 'logical_not', '(', 'operand', ')', 'self', '.', '_debug', '(', "'|_'", ',', 'result', ',', 'incr', '=', '2', ')', 'self', '[', 'tn', ']', '=', 'result', 'return', 'ast_name', '(', 'tn', ')', 'else', ':', 'return', 'self', '.', 'generic_visit', '(', 'node', ')']
Interfere with ``not`` operation to :func:`numpy.logical_not`.
['Interfere', 'with', 'not', 'operation', 'to', ':', 'func', ':', 'numpy', '.', 'logical_not', '.']
train
https://github.com/abakan-zz/napi/blob/314da65bd78e2c716b7efb6deaf3816d8f38f7fd/napi/transformers.py#L422-L435
4,716
NuGrid/NuGridPy
nugridpy/mesa.py
history_data.kippenhahn
def kippenhahn(self, num_frame, xax, t0_model=0, title='Kippenhahn diagram', tp_agb=0., t_eps=5.e2, plot_star_mass=True, symbol_size=8, c12_bm=False, print_legend=True): """Kippenhahn plot as a function of time or model. Parameters ---------- num_frame : integer Number of frame to plot this plot into, if <0 open no new figure. xax : string Either 'model', 'time' or 'logtimerev' to indicate what is to be used on the x-axis. t0_model : integer, optional If xax = 'time' then model for the zero point in time, for AGB plots this would be usually the model of the 1st TP, which can be found with the Kippenhahn plot. The default is 0. title : string, optional The figure title. The default is "Kippenhahn diagram". tp_agb : float, optional If > 0. then, ylim=[h1_min*1.-tp_agb/100 : h1_max*1.+tp_agb/100] with h1_min, h1_max the min and max H-free core mass coordinate. The default is 0. . t_eps : float, optional Final time for logtimerev. The default is '5.e2'. plot_star_mass : boolean, optional If True, then plot the stellar mass as a line as well. The default is True. symbol_size : integer, optional Size of convection boundary marker. The default is 8. c12_bm : boolean, optional If we plot c12_boundary_mass or not. The default is False. print_legend : boolean, optionla Show or do not show legend. The defalut is True. """ if num_frame >= 0: pyl.figure(num_frame) t0_mod=[] if xax == 'time': xaxisarray = self.get('star_age') if t0_model > 0: ind=self.get('model_number') t0_model=where(ind>t0_model)[0][0] t0_mod=xaxisarray[t0_model] else: t0_mod = 0. print('zero time is '+str(t0_mod)) elif xax == 'model': xaxisarray = self.get('model_number') #t0_mod=xaxisarray[t0_model] t0_mod = 0. elif xax == 'logtimerev': xaxi = self.get('star_age') xaxisarray = np.log10(np.max(xaxi)+t_eps-xaxi) t0_mod = 0. else: print('kippenhahn_error: invalid string for x-axis selction.'+\ ' needs to be "time" or "model"') plot_bounds=True try: h1_boundary_mass = self.get('h1_boundary_mass') he4_boundary_mass = self.get('he4_boundary_mass') if c12_bm: c12_boundary_mass = self.get('c12_boundary_mass') except: try: h1_boundary_mass = self.get('he_core_mass') he4_boundary_mass = self.get('c_core_mass') if c12_bm: c12_boundary_mass = self.get('o_core_mass') except: plot_bounds=False star_mass = self.get('star_mass') mx1_bot = self.get('mx1_bot')*star_mass mx1_top = self.get('mx1_top')*star_mass mx2_bot = self.get('mx2_bot')*star_mass mx2_top = self.get('mx2_top')*star_mass if xax == 'time': if t0_model>0: pyl.xlabel('$t - t_0$ $\mathrm{[yr]}$') else: pyl.xlabel('t / yrs') elif xax == 'model': pyl.xlabel('model number') elif xax == 'logtimerev': pyl.xlabel('$\log(t_{final} - t)$ $\mathrm{[yr]}$') pyl.plot(xaxisarray[t0_model:]-t0_mod,mx1_bot[t0_model:],linestyle='None',color='blue',alpha=0.3,marker='o',markersize=symbol_size,label='convection zones') pyl.plot(xaxisarray[t0_model:]-t0_mod,mx1_top[t0_model:],linestyle='None',color='blue',alpha=0.3,marker='o',markersize=symbol_size) pyl.plot(xaxisarray[t0_model:]-t0_mod,mx2_bot[t0_model:],linestyle='None',color='blue',alpha=0.3,marker='o',markersize=symbol_size) pyl.plot(xaxisarray[t0_model:]-t0_mod,mx2_top[t0_model:],linestyle='None',color='blue',alpha=0.3,marker='o',markersize=symbol_size) if plot_bounds: pyl.plot(xaxisarray[t0_model:]-t0_mod,h1_boundary_mass[t0_model:],color='red',linewidth=2,label='H-free core') pyl.plot(xaxisarray[t0_model:]-t0_mod,he4_boundary_mass[t0_model:],color='green',linewidth=2,linestyle='dashed',label='He-free core') if c12_bm: pyl.plot(xaxisarray[t0_model:]-t0_mod,c12_boundary_mass[t0_model:],color='purple',linewidth=2,linestyle='dotted',label='C-free core') if plot_star_mass is True: pyl.plot(xaxisarray[t0_model:]-t0_mod,star_mass[t0_model:],label='$M_\star$') pyl.ylabel('$m_\mathrm{r}/\mathrm{M}_\odot$') if print_legend: pyl.legend(loc=2) if tp_agb > 0.: h1_min = min(h1_boundary_mass[t0_model:]) h1_max = max(h1_boundary_mass[t0_model:]) h1_min = h1_min*(1.-old_div(tp_agb,100.)) h1_max = h1_max*(1.+old_div(tp_agb,100.)) print('setting ylim to zoom in on H-burning:',h1_min,h1_max) pyl.ylim(h1_min,h1_max)
python
def kippenhahn(self, num_frame, xax, t0_model=0, title='Kippenhahn diagram', tp_agb=0., t_eps=5.e2, plot_star_mass=True, symbol_size=8, c12_bm=False, print_legend=True): """Kippenhahn plot as a function of time or model. Parameters ---------- num_frame : integer Number of frame to plot this plot into, if <0 open no new figure. xax : string Either 'model', 'time' or 'logtimerev' to indicate what is to be used on the x-axis. t0_model : integer, optional If xax = 'time' then model for the zero point in time, for AGB plots this would be usually the model of the 1st TP, which can be found with the Kippenhahn plot. The default is 0. title : string, optional The figure title. The default is "Kippenhahn diagram". tp_agb : float, optional If > 0. then, ylim=[h1_min*1.-tp_agb/100 : h1_max*1.+tp_agb/100] with h1_min, h1_max the min and max H-free core mass coordinate. The default is 0. . t_eps : float, optional Final time for logtimerev. The default is '5.e2'. plot_star_mass : boolean, optional If True, then plot the stellar mass as a line as well. The default is True. symbol_size : integer, optional Size of convection boundary marker. The default is 8. c12_bm : boolean, optional If we plot c12_boundary_mass or not. The default is False. print_legend : boolean, optionla Show or do not show legend. The defalut is True. """ if num_frame >= 0: pyl.figure(num_frame) t0_mod=[] if xax == 'time': xaxisarray = self.get('star_age') if t0_model > 0: ind=self.get('model_number') t0_model=where(ind>t0_model)[0][0] t0_mod=xaxisarray[t0_model] else: t0_mod = 0. print('zero time is '+str(t0_mod)) elif xax == 'model': xaxisarray = self.get('model_number') #t0_mod=xaxisarray[t0_model] t0_mod = 0. elif xax == 'logtimerev': xaxi = self.get('star_age') xaxisarray = np.log10(np.max(xaxi)+t_eps-xaxi) t0_mod = 0. else: print('kippenhahn_error: invalid string for x-axis selction.'+\ ' needs to be "time" or "model"') plot_bounds=True try: h1_boundary_mass = self.get('h1_boundary_mass') he4_boundary_mass = self.get('he4_boundary_mass') if c12_bm: c12_boundary_mass = self.get('c12_boundary_mass') except: try: h1_boundary_mass = self.get('he_core_mass') he4_boundary_mass = self.get('c_core_mass') if c12_bm: c12_boundary_mass = self.get('o_core_mass') except: plot_bounds=False star_mass = self.get('star_mass') mx1_bot = self.get('mx1_bot')*star_mass mx1_top = self.get('mx1_top')*star_mass mx2_bot = self.get('mx2_bot')*star_mass mx2_top = self.get('mx2_top')*star_mass if xax == 'time': if t0_model>0: pyl.xlabel('$t - t_0$ $\mathrm{[yr]}$') else: pyl.xlabel('t / yrs') elif xax == 'model': pyl.xlabel('model number') elif xax == 'logtimerev': pyl.xlabel('$\log(t_{final} - t)$ $\mathrm{[yr]}$') pyl.plot(xaxisarray[t0_model:]-t0_mod,mx1_bot[t0_model:],linestyle='None',color='blue',alpha=0.3,marker='o',markersize=symbol_size,label='convection zones') pyl.plot(xaxisarray[t0_model:]-t0_mod,mx1_top[t0_model:],linestyle='None',color='blue',alpha=0.3,marker='o',markersize=symbol_size) pyl.plot(xaxisarray[t0_model:]-t0_mod,mx2_bot[t0_model:],linestyle='None',color='blue',alpha=0.3,marker='o',markersize=symbol_size) pyl.plot(xaxisarray[t0_model:]-t0_mod,mx2_top[t0_model:],linestyle='None',color='blue',alpha=0.3,marker='o',markersize=symbol_size) if plot_bounds: pyl.plot(xaxisarray[t0_model:]-t0_mod,h1_boundary_mass[t0_model:],color='red',linewidth=2,label='H-free core') pyl.plot(xaxisarray[t0_model:]-t0_mod,he4_boundary_mass[t0_model:],color='green',linewidth=2,linestyle='dashed',label='He-free core') if c12_bm: pyl.plot(xaxisarray[t0_model:]-t0_mod,c12_boundary_mass[t0_model:],color='purple',linewidth=2,linestyle='dotted',label='C-free core') if plot_star_mass is True: pyl.plot(xaxisarray[t0_model:]-t0_mod,star_mass[t0_model:],label='$M_\star$') pyl.ylabel('$m_\mathrm{r}/\mathrm{M}_\odot$') if print_legend: pyl.legend(loc=2) if tp_agb > 0.: h1_min = min(h1_boundary_mass[t0_model:]) h1_max = max(h1_boundary_mass[t0_model:]) h1_min = h1_min*(1.-old_div(tp_agb,100.)) h1_max = h1_max*(1.+old_div(tp_agb,100.)) print('setting ylim to zoom in on H-burning:',h1_min,h1_max) pyl.ylim(h1_min,h1_max)
['def', 'kippenhahn', '(', 'self', ',', 'num_frame', ',', 'xax', ',', 't0_model', '=', '0', ',', 'title', '=', "'Kippenhahn diagram'", ',', 'tp_agb', '=', '0.', ',', 't_eps', '=', '5.e2', ',', 'plot_star_mass', '=', 'True', ',', 'symbol_size', '=', '8', ',', 'c12_bm', '=', 'False', ',', 'print_legend', '=', 'True', ')', ':', 'if', 'num_frame', '>=', '0', ':', 'pyl', '.', 'figure', '(', 'num_frame', ')', 't0_mod', '=', '[', ']', 'if', 'xax', '==', "'time'", ':', 'xaxisarray', '=', 'self', '.', 'get', '(', "'star_age'", ')', 'if', 't0_model', '>', '0', ':', 'ind', '=', 'self', '.', 'get', '(', "'model_number'", ')', 't0_model', '=', 'where', '(', 'ind', '>', 't0_model', ')', '[', '0', ']', '[', '0', ']', 't0_mod', '=', 'xaxisarray', '[', 't0_model', ']', 'else', ':', 't0_mod', '=', '0.', 'print', '(', "'zero time is '", '+', 'str', '(', 't0_mod', ')', ')', 'elif', 'xax', '==', "'model'", ':', 'xaxisarray', '=', 'self', '.', 'get', '(', "'model_number'", ')', '#t0_mod=xaxisarray[t0_model]', 't0_mod', '=', '0.', 'elif', 'xax', '==', "'logtimerev'", ':', 'xaxi', '=', 'self', '.', 'get', '(', "'star_age'", ')', 'xaxisarray', '=', 'np', '.', 'log10', '(', 'np', '.', 'max', '(', 'xaxi', ')', '+', 't_eps', '-', 'xaxi', ')', 't0_mod', '=', '0.', 'else', ':', 'print', '(', "'kippenhahn_error: invalid string for x-axis selction.'", '+', '\' needs to be "time" or "model"\'', ')', 'plot_bounds', '=', 'True', 'try', ':', 'h1_boundary_mass', '=', 'self', '.', 'get', '(', "'h1_boundary_mass'", ')', 'he4_boundary_mass', '=', 'self', '.', 'get', '(', "'he4_boundary_mass'", ')', 'if', 'c12_bm', ':', 'c12_boundary_mass', '=', 'self', '.', 'get', '(', "'c12_boundary_mass'", ')', 'except', ':', 'try', ':', 'h1_boundary_mass', '=', 'self', '.', 'get', '(', "'he_core_mass'", ')', 'he4_boundary_mass', '=', 'self', '.', 'get', '(', "'c_core_mass'", ')', 'if', 'c12_bm', ':', 'c12_boundary_mass', '=', 'self', '.', 'get', '(', "'o_core_mass'", ')', 'except', ':', 'plot_bounds', '=', 'False', 'star_mass', '=', 'self', '.', 'get', '(', "'star_mass'", ')', 'mx1_bot', '=', 'self', '.', 'get', '(', "'mx1_bot'", ')', '*', 'star_mass', 'mx1_top', '=', 'self', '.', 'get', '(', "'mx1_top'", ')', '*', 'star_mass', 'mx2_bot', '=', 'self', '.', 'get', '(', "'mx2_bot'", ')', '*', 'star_mass', 'mx2_top', '=', 'self', '.', 'get', '(', "'mx2_top'", ')', '*', 'star_mass', 'if', 'xax', '==', "'time'", ':', 'if', 't0_model', '>', '0', ':', 'pyl', '.', 'xlabel', '(', "'$t - t_0$ $\\mathrm{[yr]}$'", ')', 'else', ':', 'pyl', '.', 'xlabel', '(', "'t / yrs'", ')', 'elif', 'xax', '==', "'model'", ':', 'pyl', '.', 'xlabel', '(', "'model number'", ')', 'elif', 'xax', '==', "'logtimerev'", ':', 'pyl', '.', 'xlabel', '(', "'$\\log(t_{final} - t)$ $\\mathrm{[yr]}$'", ')', 'pyl', '.', 'plot', '(', 'xaxisarray', '[', 't0_model', ':', ']', '-', 't0_mod', ',', 'mx1_bot', '[', 't0_model', ':', ']', ',', 'linestyle', '=', "'None'", ',', 'color', '=', "'blue'", ',', 'alpha', '=', '0.3', ',', 'marker', '=', "'o'", ',', 'markersize', '=', 'symbol_size', ',', 'label', '=', "'convection zones'", ')', 'pyl', '.', 'plot', '(', 'xaxisarray', '[', 't0_model', ':', ']', '-', 't0_mod', ',', 'mx1_top', '[', 't0_model', ':', ']', ',', 'linestyle', '=', "'None'", ',', 'color', '=', "'blue'", ',', 'alpha', '=', '0.3', ',', 'marker', '=', "'o'", ',', 'markersize', '=', 'symbol_size', ')', 'pyl', '.', 'plot', '(', 'xaxisarray', '[', 't0_model', ':', ']', '-', 't0_mod', ',', 'mx2_bot', '[', 't0_model', ':', ']', ',', 'linestyle', '=', "'None'", ',', 'color', '=', "'blue'", ',', 'alpha', '=', '0.3', ',', 'marker', '=', "'o'", ',', 'markersize', '=', 'symbol_size', ')', 'pyl', '.', 'plot', '(', 'xaxisarray', '[', 't0_model', ':', ']', '-', 't0_mod', ',', 'mx2_top', '[', 't0_model', ':', ']', ',', 'linestyle', '=', "'None'", ',', 'color', '=', "'blue'", ',', 'alpha', '=', '0.3', ',', 'marker', '=', "'o'", ',', 'markersize', '=', 'symbol_size', ')', 'if', 'plot_bounds', ':', 'pyl', '.', 'plot', '(', 'xaxisarray', '[', 't0_model', ':', ']', '-', 't0_mod', ',', 'h1_boundary_mass', '[', 't0_model', ':', ']', ',', 'color', '=', "'red'", ',', 'linewidth', '=', '2', ',', 'label', '=', "'H-free core'", ')', 'pyl', '.', 'plot', '(', 'xaxisarray', '[', 't0_model', ':', ']', '-', 't0_mod', ',', 'he4_boundary_mass', '[', 't0_model', ':', ']', ',', 'color', '=', "'green'", ',', 'linewidth', '=', '2', ',', 'linestyle', '=', "'dashed'", ',', 'label', '=', "'He-free core'", ')', 'if', 'c12_bm', ':', 'pyl', '.', 'plot', '(', 'xaxisarray', '[', 't0_model', ':', ']', '-', 't0_mod', ',', 'c12_boundary_mass', '[', 't0_model', ':', ']', ',', 'color', '=', "'purple'", ',', 'linewidth', '=', '2', ',', 'linestyle', '=', "'dotted'", ',', 'label', '=', "'C-free core'", ')', 'if', 'plot_star_mass', 'is', 'True', ':', 'pyl', '.', 'plot', '(', 'xaxisarray', '[', 't0_model', ':', ']', '-', 't0_mod', ',', 'star_mass', '[', 't0_model', ':', ']', ',', 'label', '=', "'$M_\\star$'", ')', 'pyl', '.', 'ylabel', '(', "'", '\\m', '\\m', 'o', 'dot', '$', "'", ')', 'if', 'print_legend', ':', 'pyl', '.', 'legend', '(', 'loc', '=', '2', ')', 'if', 'tp_agb', '>', '0.', ':', 'h1_min', '=', 'min', '(', 'h1_boundary_mass', '[', 't0_model', ':', ']', ')', 'h1_max', '=', 'max', '(', 'h1_boundary_mass', '[', 't0_model', ':', ']', ')', 'h1_min', '=', 'h1_min', '*', '(', '1.', '-', 'old_div', '(', 'tp_agb', ',', '100.', ')', ')', 'h1_max', '=', 'h1_max', '*', '(', '1.', '+', 'old_div', '(', 'tp_agb', ',', '100.', ')', ')', 'print', '(', "'setting ylim to zoom in on H-burning:'", ',', 'h1_min', ',', 'h1_max', ')', 'pyl', '.', 'ylim', '(', 'h1_min', ',', 'h1_max', ')']
Kippenhahn plot as a function of time or model. Parameters ---------- num_frame : integer Number of frame to plot this plot into, if <0 open no new figure. xax : string Either 'model', 'time' or 'logtimerev' to indicate what is to be used on the x-axis. t0_model : integer, optional If xax = 'time' then model for the zero point in time, for AGB plots this would be usually the model of the 1st TP, which can be found with the Kippenhahn plot. The default is 0. title : string, optional The figure title. The default is "Kippenhahn diagram". tp_agb : float, optional If > 0. then, ylim=[h1_min*1.-tp_agb/100 : h1_max*1.+tp_agb/100] with h1_min, h1_max the min and max H-free core mass coordinate. The default is 0. . t_eps : float, optional Final time for logtimerev. The default is '5.e2'. plot_star_mass : boolean, optional If True, then plot the stellar mass as a line as well. The default is True. symbol_size : integer, optional Size of convection boundary marker. The default is 8. c12_bm : boolean, optional If we plot c12_boundary_mass or not. The default is False. print_legend : boolean, optionla Show or do not show legend. The defalut is True.
['Kippenhahn', 'plot', 'as', 'a', 'function', 'of', 'time', 'or', 'model', '.']
train
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1538-L1658
4,717
python-security/pyt
pyt/cfg/alias_helper.py
handle_aliases_in_init_files
def handle_aliases_in_init_files(name, import_alias_mapping): """Returns either None or the handled alias. Used in add_module. """ for key, val in import_alias_mapping.items(): # e.g. Foo == Foo # e.g. Foo.Bar startswith Foo. if name == val or \ name.startswith(val + '.'): # Replace val with key in name # e.g. StarbucksVisitor.Tea -> Eataly.Tea because # "from .nested_folder import StarbucksVisitor as Eataly" return name.replace(val, key) return None
python
def handle_aliases_in_init_files(name, import_alias_mapping): """Returns either None or the handled alias. Used in add_module. """ for key, val in import_alias_mapping.items(): # e.g. Foo == Foo # e.g. Foo.Bar startswith Foo. if name == val or \ name.startswith(val + '.'): # Replace val with key in name # e.g. StarbucksVisitor.Tea -> Eataly.Tea because # "from .nested_folder import StarbucksVisitor as Eataly" return name.replace(val, key) return None
['def', 'handle_aliases_in_init_files', '(', 'name', ',', 'import_alias_mapping', ')', ':', 'for', 'key', ',', 'val', 'in', 'import_alias_mapping', '.', 'items', '(', ')', ':', '# e.g. Foo == Foo', '# e.g. Foo.Bar startswith Foo.', 'if', 'name', '==', 'val', 'or', 'name', '.', 'startswith', '(', 'val', '+', "'.'", ')', ':', '# Replace val with key in name', '# e.g. StarbucksVisitor.Tea -> Eataly.Tea because', '# "from .nested_folder import StarbucksVisitor as Eataly"', 'return', 'name', '.', 'replace', '(', 'val', ',', 'key', ')', 'return', 'None']
Returns either None or the handled alias. Used in add_module.
['Returns', 'either', 'None', 'or', 'the', 'handled', 'alias', '.', 'Used', 'in', 'add_module', '.']
train
https://github.com/python-security/pyt/blob/efc0cfb716e40e0c8df4098f1cc8cf43723cd31f/pyt/cfg/alias_helper.py#L32-L46
4,718
atztogo/phonopy
phonopy/structure/tetrahedron_method.py
TetrahedronMethod._g_3
def _g_3(self): """omega3 < omega < omega4""" # return 3 * (1.0 - self._n_3()) / (self._vertices_omegas[3] - self._omega) return (3 * self._f(1, 3) * self._f(2, 3) / (self._vertices_omegas[3] - self._vertices_omegas[0]))
python
def _g_3(self): """omega3 < omega < omega4""" # return 3 * (1.0 - self._n_3()) / (self._vertices_omegas[3] - self._omega) return (3 * self._f(1, 3) * self._f(2, 3) / (self._vertices_omegas[3] - self._vertices_omegas[0]))
['def', '_g_3', '(', 'self', ')', ':', '# return 3 * (1.0 - self._n_3()) / (self._vertices_omegas[3] - self._omega)', 'return', '(', '3', '*', 'self', '.', '_f', '(', '1', ',', '3', ')', '*', 'self', '.', '_f', '(', '2', ',', '3', ')', '/', '(', 'self', '.', '_vertices_omegas', '[', '3', ']', '-', 'self', '.', '_vertices_omegas', '[', '0', ']', ')', ')']
omega3 < omega < omega4
['omega3', '<', 'omega', '<', 'omega4']
train
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/structure/tetrahedron_method.py#L450-L454
4,719
programa-stic/barf-project
barf/core/smt/smttranslator.py
SmtTranslator.get_name_init
def get_name_init(self, name): """Get initial name of symbol. """ self._register_name(name) return self._var_name_mappers[name].get_init()
python
def get_name_init(self, name): """Get initial name of symbol. """ self._register_name(name) return self._var_name_mappers[name].get_init()
['def', 'get_name_init', '(', 'self', ',', 'name', ')', ':', 'self', '.', '_register_name', '(', 'name', ')', 'return', 'self', '.', '_var_name_mappers', '[', 'name', ']', '.', 'get_init', '(', ')']
Get initial name of symbol.
['Get', 'initial', 'name', 'of', 'symbol', '.']
train
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/smt/smttranslator.py#L138-L143
4,720
nicolargo/glances
glances/amps/glances_amp.py
GlancesAmp.load_config
def load_config(self, config): """Load AMP parameters from the configuration file.""" # Read AMP confifuration. # For ex, the AMP foo should have the following section: # # [foo] # enable=true # regex=\/usr\/bin\/nginx # refresh=60 # # and optionnaly: # # one_line=false # option1=opt1 # ... # amp_section = 'amp_' + self.amp_name if (hasattr(config, 'has_section') and config.has_section(amp_section)): logger.debug("AMP - {}: Load configuration".format(self.NAME)) for param, _ in config.items(amp_section): try: self.configs[param] = config.get_float_value(amp_section, param) except ValueError: self.configs[param] = config.get_value(amp_section, param).split(',') if len(self.configs[param]) == 1: self.configs[param] = self.configs[param][0] logger.debug("AMP - {}: Load parameter: {} = {}".format(self.NAME, param, self.configs[param])) else: logger.debug("AMP - {}: Can not find section {} in the configuration file".format(self.NAME, self.amp_name)) return False # enable, regex and refresh are mandatories # if not configured then AMP is disabled if self.enable(): for k in ['regex', 'refresh']: if k not in self.configs: logger.warning("AMP - {}: Can not find configuration key {} in section {}".format(self.NAME, k, self.amp_name)) self.configs['enable'] = 'false' else: logger.debug("AMP - {} is disabled".format(self.NAME)) # Init the count to 0 self.configs['count'] = 0 return self.enable()
python
def load_config(self, config): """Load AMP parameters from the configuration file.""" # Read AMP confifuration. # For ex, the AMP foo should have the following section: # # [foo] # enable=true # regex=\/usr\/bin\/nginx # refresh=60 # # and optionnaly: # # one_line=false # option1=opt1 # ... # amp_section = 'amp_' + self.amp_name if (hasattr(config, 'has_section') and config.has_section(amp_section)): logger.debug("AMP - {}: Load configuration".format(self.NAME)) for param, _ in config.items(amp_section): try: self.configs[param] = config.get_float_value(amp_section, param) except ValueError: self.configs[param] = config.get_value(amp_section, param).split(',') if len(self.configs[param]) == 1: self.configs[param] = self.configs[param][0] logger.debug("AMP - {}: Load parameter: {} = {}".format(self.NAME, param, self.configs[param])) else: logger.debug("AMP - {}: Can not find section {} in the configuration file".format(self.NAME, self.amp_name)) return False # enable, regex and refresh are mandatories # if not configured then AMP is disabled if self.enable(): for k in ['regex', 'refresh']: if k not in self.configs: logger.warning("AMP - {}: Can not find configuration key {} in section {}".format(self.NAME, k, self.amp_name)) self.configs['enable'] = 'false' else: logger.debug("AMP - {} is disabled".format(self.NAME)) # Init the count to 0 self.configs['count'] = 0 return self.enable()
['def', 'load_config', '(', 'self', ',', 'config', ')', ':', '# Read AMP confifuration.', '# For ex, the AMP foo should have the following section:', '#', '# [foo]', '# enable=true', '# regex=\\/usr\\/bin\\/nginx', '# refresh=60', '#', '# and optionnaly:', '#', '# one_line=false', '# option1=opt1', '# ...', '#', 'amp_section', '=', "'amp_'", '+', 'self', '.', 'amp_name', 'if', '(', 'hasattr', '(', 'config', ',', "'has_section'", ')', 'and', 'config', '.', 'has_section', '(', 'amp_section', ')', ')', ':', 'logger', '.', 'debug', '(', '"AMP - {}: Load configuration"', '.', 'format', '(', 'self', '.', 'NAME', ')', ')', 'for', 'param', ',', '_', 'in', 'config', '.', 'items', '(', 'amp_section', ')', ':', 'try', ':', 'self', '.', 'configs', '[', 'param', ']', '=', 'config', '.', 'get_float_value', '(', 'amp_section', ',', 'param', ')', 'except', 'ValueError', ':', 'self', '.', 'configs', '[', 'param', ']', '=', 'config', '.', 'get_value', '(', 'amp_section', ',', 'param', ')', '.', 'split', '(', "','", ')', 'if', 'len', '(', 'self', '.', 'configs', '[', 'param', ']', ')', '==', '1', ':', 'self', '.', 'configs', '[', 'param', ']', '=', 'self', '.', 'configs', '[', 'param', ']', '[', '0', ']', 'logger', '.', 'debug', '(', '"AMP - {}: Load parameter: {} = {}"', '.', 'format', '(', 'self', '.', 'NAME', ',', 'param', ',', 'self', '.', 'configs', '[', 'param', ']', ')', ')', 'else', ':', 'logger', '.', 'debug', '(', '"AMP - {}: Can not find section {} in the configuration file"', '.', 'format', '(', 'self', '.', 'NAME', ',', 'self', '.', 'amp_name', ')', ')', 'return', 'False', '# enable, regex and refresh are mandatories', '# if not configured then AMP is disabled', 'if', 'self', '.', 'enable', '(', ')', ':', 'for', 'k', 'in', '[', "'regex'", ',', "'refresh'", ']', ':', 'if', 'k', 'not', 'in', 'self', '.', 'configs', ':', 'logger', '.', 'warning', '(', '"AMP - {}: Can not find configuration key {} in section {}"', '.', 'format', '(', 'self', '.', 'NAME', ',', 'k', ',', 'self', '.', 'amp_name', ')', ')', 'self', '.', 'configs', '[', "'enable'", ']', '=', "'false'", 'else', ':', 'logger', '.', 'debug', '(', '"AMP - {} is disabled"', '.', 'format', '(', 'self', '.', 'NAME', ')', ')', '# Init the count to 0', 'self', '.', 'configs', '[', "'count'", ']', '=', '0', 'return', 'self', '.', 'enable', '(', ')']
Load AMP parameters from the configuration file.
['Load', 'AMP', 'parameters', 'from', 'the', 'configuration', 'file', '.']
train
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/amps/glances_amp.py#L69-L115
4,721
minhhoit/yacms
yacms/core/templatetags/yacms_tags.py
ifinstalled
def ifinstalled(parser, token): """ Old-style ``if`` tag that renders contents if the given app is installed. The main use case is: {% ifinstalled app_name %} {% include "app_name/template.html" %} {% endifinstalled %} so we need to manually pull out all tokens if the app isn't installed, since if we used a normal ``if`` tag with a False arg, the include tag will still try and find the template to include. """ try: tag, app = token.split_contents() except ValueError: raise TemplateSyntaxError("ifinstalled should be in the form: " "{% ifinstalled app_name %}" "{% endifinstalled %}") end_tag = "end" + tag unmatched_end_tag = 1 if app.strip("\"'") not in settings.INSTALLED_APPS: while unmatched_end_tag: token = parser.tokens.pop(0) if token.token_type == TOKEN_BLOCK: block_name = token.contents.split()[0] if block_name == tag: unmatched_end_tag += 1 if block_name == end_tag: unmatched_end_tag -= 1 parser.tokens.insert(0, token) nodelist = parser.parse((end_tag,)) parser.delete_first_token() class IfInstalledNode(Node): def render(self, context): return nodelist.render(context) return IfInstalledNode()
python
def ifinstalled(parser, token): """ Old-style ``if`` tag that renders contents if the given app is installed. The main use case is: {% ifinstalled app_name %} {% include "app_name/template.html" %} {% endifinstalled %} so we need to manually pull out all tokens if the app isn't installed, since if we used a normal ``if`` tag with a False arg, the include tag will still try and find the template to include. """ try: tag, app = token.split_contents() except ValueError: raise TemplateSyntaxError("ifinstalled should be in the form: " "{% ifinstalled app_name %}" "{% endifinstalled %}") end_tag = "end" + tag unmatched_end_tag = 1 if app.strip("\"'") not in settings.INSTALLED_APPS: while unmatched_end_tag: token = parser.tokens.pop(0) if token.token_type == TOKEN_BLOCK: block_name = token.contents.split()[0] if block_name == tag: unmatched_end_tag += 1 if block_name == end_tag: unmatched_end_tag -= 1 parser.tokens.insert(0, token) nodelist = parser.parse((end_tag,)) parser.delete_first_token() class IfInstalledNode(Node): def render(self, context): return nodelist.render(context) return IfInstalledNode()
['def', 'ifinstalled', '(', 'parser', ',', 'token', ')', ':', 'try', ':', 'tag', ',', 'app', '=', 'token', '.', 'split_contents', '(', ')', 'except', 'ValueError', ':', 'raise', 'TemplateSyntaxError', '(', '"ifinstalled should be in the form: "', '"{% ifinstalled app_name %}"', '"{% endifinstalled %}"', ')', 'end_tag', '=', '"end"', '+', 'tag', 'unmatched_end_tag', '=', '1', 'if', 'app', '.', 'strip', '(', '"\\"\'"', ')', 'not', 'in', 'settings', '.', 'INSTALLED_APPS', ':', 'while', 'unmatched_end_tag', ':', 'token', '=', 'parser', '.', 'tokens', '.', 'pop', '(', '0', ')', 'if', 'token', '.', 'token_type', '==', 'TOKEN_BLOCK', ':', 'block_name', '=', 'token', '.', 'contents', '.', 'split', '(', ')', '[', '0', ']', 'if', 'block_name', '==', 'tag', ':', 'unmatched_end_tag', '+=', '1', 'if', 'block_name', '==', 'end_tag', ':', 'unmatched_end_tag', '-=', '1', 'parser', '.', 'tokens', '.', 'insert', '(', '0', ',', 'token', ')', 'nodelist', '=', 'parser', '.', 'parse', '(', '(', 'end_tag', ',', ')', ')', 'parser', '.', 'delete_first_token', '(', ')', 'class', 'IfInstalledNode', '(', 'Node', ')', ':', 'def', 'render', '(', 'self', ',', 'context', ')', ':', 'return', 'nodelist', '.', 'render', '(', 'context', ')', 'return', 'IfInstalledNode', '(', ')']
Old-style ``if`` tag that renders contents if the given app is installed. The main use case is: {% ifinstalled app_name %} {% include "app_name/template.html" %} {% endifinstalled %} so we need to manually pull out all tokens if the app isn't installed, since if we used a normal ``if`` tag with a False arg, the include tag will still try and find the template to include.
['Old', '-', 'style', 'if', 'tag', 'that', 'renders', 'contents', 'if', 'the', 'given', 'app', 'is', 'installed', '.', 'The', 'main', 'use', 'case', 'is', ':']
train
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/templatetags/yacms_tags.py#L149-L188
4,722
pip-services3-python/pip-services3-commons-python
pip_services3_commons/data/AnyValueMap.py
AnyValueMap.get_as_integer
def get_as_integer(self, key): """ Converts map element into an integer or returns 0 if conversion is not possible. :param key: an index of element to get. :return: integer value ot the element or 0 if conversion is not supported. """ value = self.get(key) return IntegerConverter.to_integer(value)
python
def get_as_integer(self, key): """ Converts map element into an integer or returns 0 if conversion is not possible. :param key: an index of element to get. :return: integer value ot the element or 0 if conversion is not supported. """ value = self.get(key) return IntegerConverter.to_integer(value)
['def', 'get_as_integer', '(', 'self', ',', 'key', ')', ':', 'value', '=', 'self', '.', 'get', '(', 'key', ')', 'return', 'IntegerConverter', '.', 'to_integer', '(', 'value', ')']
Converts map element into an integer or returns 0 if conversion is not possible. :param key: an index of element to get. :return: integer value ot the element or 0 if conversion is not supported.
['Converts', 'map', 'element', 'into', 'an', 'integer', 'or', 'returns', '0', 'if', 'conversion', 'is', 'not', 'possible', '.']
train
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/AnyValueMap.py#L245-L254
4,723
openvax/varcode
varcode/nucleotides.py
is_purine
def is_purine(nucleotide, allow_extended_nucleotides=False): """Is the nucleotide a purine""" if not allow_extended_nucleotides and nucleotide not in STANDARD_NUCLEOTIDES: raise ValueError( "{} is a non-standard nucleotide, neither purine or pyrimidine".format(nucleotide)) return nucleotide in PURINE_NUCLEOTIDES
python
def is_purine(nucleotide, allow_extended_nucleotides=False): """Is the nucleotide a purine""" if not allow_extended_nucleotides and nucleotide not in STANDARD_NUCLEOTIDES: raise ValueError( "{} is a non-standard nucleotide, neither purine or pyrimidine".format(nucleotide)) return nucleotide in PURINE_NUCLEOTIDES
['def', 'is_purine', '(', 'nucleotide', ',', 'allow_extended_nucleotides', '=', 'False', ')', ':', 'if', 'not', 'allow_extended_nucleotides', 'and', 'nucleotide', 'not', 'in', 'STANDARD_NUCLEOTIDES', ':', 'raise', 'ValueError', '(', '"{} is a non-standard nucleotide, neither purine or pyrimidine"', '.', 'format', '(', 'nucleotide', ')', ')', 'return', 'nucleotide', 'in', 'PURINE_NUCLEOTIDES']
Is the nucleotide a purine
['Is', 'the', 'nucleotide', 'a', 'purine']
train
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/nucleotides.py#L55-L60
4,724
limodou/uliweb
uliweb/utils/generic.py
make_view_field
def make_view_field(field, obj=None, types_convert_map=None, fields_convert_map=None, value=__default_value__, auto_convert=True): """ If auto_convert, then all values will be converted to string format, otherwise remain the orignal value """ from uliweb.utils.textconvert import text2html from uliweb.core.html import Tag old_value = value types_convert_map = types_convert_map or {} fields_convert_map = fields_convert_map or {} default_convert_map = {orm.TextProperty:lambda v,o:text2html(v)} if isinstance(field, dict): if 'prop' in field and field.get('prop'): prop = field['prop'] else: prop = field name = field.get('name') else: prop = field name = prop.property_name #not real Property instance, then return itself, so if should return #just like {'label':xxx, 'value':xxx, 'display':xxx} if not isinstance(prop, orm.Property): if old_value is __default_value__: value = prop.get('value', '') display = prop.get('display', value) label = prop.get('label', '') or prop.get('verbose_name', '') convert = prop.get('convert', None) else: if old_value is __default_value__: if isinstance(obj, Model): value = prop.get_value_for_datastore(obj) if value is Lazy: getattr(obj, prop.property_name) value = prop.get_value_for_datastore(obj) else: value = obj[name] if auto_convert or prop.choices: display = prop.get_display_value(value) else: display = value if isinstance(field, dict): initial = field.get('verbose_name', None) else: initial = '' label = initial or prop.verbose_name or name if name in fields_convert_map: convert = fields_convert_map.get(name, None) else: if isinstance(prop, orm.Property): convert = types_convert_map.get(prop.__class__, None) if not convert: convert = default_convert_map.get(prop.__class__, None) convert_result = None if convert: convert_result = convert(value, obj) if convert_result is None: if value is not None: if isinstance(prop, orm.ManyToMany): s = [] #support value parameter, the old value is already stored in "old_value" variable if old_value is not __default_value__: if prop.reference_fieldname == 'id': query = [] for _id in old_value: _v = functions.get_cached_object(prop.reference_class, _id) query.append(_v) else: query = prop.reference_class.filter(prop.reference_class.c[prop.reversed_fieldname].in_(old_value)) else: if prop.reference_fieldname == 'id': query = [] _ids = prop.get_value_for_datastore(obj, cached=True) for _id in _ids: _v = functions.get_cached_object(prop.reference_class, _id) if not _v: log.debug("Can't find object %s:%d" % (prop.reference_class.__name__, _id)) _v = _id query.append(_v) else: query = getattr(obj, prop.property_name).all() for x in query: if isinstance(x, orm.Model): s.append(get_obj_url(x)) else: s.append(str(x)) display = ' '.join(s) elif isinstance(prop, orm.ReferenceProperty) or isinstance(prop, orm.OneToOne): try: if old_value is not __default_value__: d = prop.reference_class.c[prop.reference_fieldname] if prop.reference_fieldname == 'id': v = functions.get_cached_object(prop.reference_class, old_value) else: v = prop.reference_class.get(d==old_value) if not isinstance(obj, Model): d = prop.reference_class.c[prop.reference_fieldname] if prop.reference_fieldname == 'id': v = functions.get_cached_object(prop.reference_class, value) else: v = prop.reference_class.get(d==value) else: if prop.reference_fieldname == 'id': v = functions.get_cached_object(prop.reference_class, obj.get_datastore_value(prop.property_name)) else: v = functions.get_cached_object(prop.reference_class, condition=prop.reference_class.c[prop.reference_fieldname]==obj.get_datastore_value(prop.property_name)) except orm.Error: display = prop.get_datastore_value(obj) or '' v = None if isinstance(v, Model): display = get_obj_url(v) else: display = str(v if v is not None else '') elif isinstance(prop, orm.FileProperty): url = functions.get_href(value) if url: display = str(Tag('a', value, href=url)) else: display = '' # if isinstance(prop, orm.Property) and prop.choices is not None: # display = prop.get_display_value(value) if prop.__class__ is orm.TextProperty: display = text2html(value) else: display = convert_result if isinstance(display, unicode): display = display.encode('utf-8') if display is None: display = '' return Storage({'label':label, 'value':value, 'display':display, 'name':name})
python
def make_view_field(field, obj=None, types_convert_map=None, fields_convert_map=None, value=__default_value__, auto_convert=True): """ If auto_convert, then all values will be converted to string format, otherwise remain the orignal value """ from uliweb.utils.textconvert import text2html from uliweb.core.html import Tag old_value = value types_convert_map = types_convert_map or {} fields_convert_map = fields_convert_map or {} default_convert_map = {orm.TextProperty:lambda v,o:text2html(v)} if isinstance(field, dict): if 'prop' in field and field.get('prop'): prop = field['prop'] else: prop = field name = field.get('name') else: prop = field name = prop.property_name #not real Property instance, then return itself, so if should return #just like {'label':xxx, 'value':xxx, 'display':xxx} if not isinstance(prop, orm.Property): if old_value is __default_value__: value = prop.get('value', '') display = prop.get('display', value) label = prop.get('label', '') or prop.get('verbose_name', '') convert = prop.get('convert', None) else: if old_value is __default_value__: if isinstance(obj, Model): value = prop.get_value_for_datastore(obj) if value is Lazy: getattr(obj, prop.property_name) value = prop.get_value_for_datastore(obj) else: value = obj[name] if auto_convert or prop.choices: display = prop.get_display_value(value) else: display = value if isinstance(field, dict): initial = field.get('verbose_name', None) else: initial = '' label = initial or prop.verbose_name or name if name in fields_convert_map: convert = fields_convert_map.get(name, None) else: if isinstance(prop, orm.Property): convert = types_convert_map.get(prop.__class__, None) if not convert: convert = default_convert_map.get(prop.__class__, None) convert_result = None if convert: convert_result = convert(value, obj) if convert_result is None: if value is not None: if isinstance(prop, orm.ManyToMany): s = [] #support value parameter, the old value is already stored in "old_value" variable if old_value is not __default_value__: if prop.reference_fieldname == 'id': query = [] for _id in old_value: _v = functions.get_cached_object(prop.reference_class, _id) query.append(_v) else: query = prop.reference_class.filter(prop.reference_class.c[prop.reversed_fieldname].in_(old_value)) else: if prop.reference_fieldname == 'id': query = [] _ids = prop.get_value_for_datastore(obj, cached=True) for _id in _ids: _v = functions.get_cached_object(prop.reference_class, _id) if not _v: log.debug("Can't find object %s:%d" % (prop.reference_class.__name__, _id)) _v = _id query.append(_v) else: query = getattr(obj, prop.property_name).all() for x in query: if isinstance(x, orm.Model): s.append(get_obj_url(x)) else: s.append(str(x)) display = ' '.join(s) elif isinstance(prop, orm.ReferenceProperty) or isinstance(prop, orm.OneToOne): try: if old_value is not __default_value__: d = prop.reference_class.c[prop.reference_fieldname] if prop.reference_fieldname == 'id': v = functions.get_cached_object(prop.reference_class, old_value) else: v = prop.reference_class.get(d==old_value) if not isinstance(obj, Model): d = prop.reference_class.c[prop.reference_fieldname] if prop.reference_fieldname == 'id': v = functions.get_cached_object(prop.reference_class, value) else: v = prop.reference_class.get(d==value) else: if prop.reference_fieldname == 'id': v = functions.get_cached_object(prop.reference_class, obj.get_datastore_value(prop.property_name)) else: v = functions.get_cached_object(prop.reference_class, condition=prop.reference_class.c[prop.reference_fieldname]==obj.get_datastore_value(prop.property_name)) except orm.Error: display = prop.get_datastore_value(obj) or '' v = None if isinstance(v, Model): display = get_obj_url(v) else: display = str(v if v is not None else '') elif isinstance(prop, orm.FileProperty): url = functions.get_href(value) if url: display = str(Tag('a', value, href=url)) else: display = '' # if isinstance(prop, orm.Property) and prop.choices is not None: # display = prop.get_display_value(value) if prop.__class__ is orm.TextProperty: display = text2html(value) else: display = convert_result if isinstance(display, unicode): display = display.encode('utf-8') if display is None: display = '' return Storage({'label':label, 'value':value, 'display':display, 'name':name})
['def', 'make_view_field', '(', 'field', ',', 'obj', '=', 'None', ',', 'types_convert_map', '=', 'None', ',', 'fields_convert_map', '=', 'None', ',', 'value', '=', '__default_value__', ',', 'auto_convert', '=', 'True', ')', ':', 'from', 'uliweb', '.', 'utils', '.', 'textconvert', 'import', 'text2html', 'from', 'uliweb', '.', 'core', '.', 'html', 'import', 'Tag', 'old_value', '=', 'value', 'types_convert_map', '=', 'types_convert_map', 'or', '{', '}', 'fields_convert_map', '=', 'fields_convert_map', 'or', '{', '}', 'default_convert_map', '=', '{', 'orm', '.', 'TextProperty', ':', 'lambda', 'v', ',', 'o', ':', 'text2html', '(', 'v', ')', '}', 'if', 'isinstance', '(', 'field', ',', 'dict', ')', ':', 'if', "'prop'", 'in', 'field', 'and', 'field', '.', 'get', '(', "'prop'", ')', ':', 'prop', '=', 'field', '[', "'prop'", ']', 'else', ':', 'prop', '=', 'field', 'name', '=', 'field', '.', 'get', '(', "'name'", ')', 'else', ':', 'prop', '=', 'field', 'name', '=', 'prop', '.', 'property_name', '#not real Property instance, then return itself, so if should return\r', "#just like {'label':xxx, 'value':xxx, 'display':xxx}\r", 'if', 'not', 'isinstance', '(', 'prop', ',', 'orm', '.', 'Property', ')', ':', 'if', 'old_value', 'is', '__default_value__', ':', 'value', '=', 'prop', '.', 'get', '(', "'value'", ',', "''", ')', 'display', '=', 'prop', '.', 'get', '(', "'display'", ',', 'value', ')', 'label', '=', 'prop', '.', 'get', '(', "'label'", ',', "''", ')', 'or', 'prop', '.', 'get', '(', "'verbose_name'", ',', "''", ')', 'convert', '=', 'prop', '.', 'get', '(', "'convert'", ',', 'None', ')', 'else', ':', 'if', 'old_value', 'is', '__default_value__', ':', 'if', 'isinstance', '(', 'obj', ',', 'Model', ')', ':', 'value', '=', 'prop', '.', 'get_value_for_datastore', '(', 'obj', ')', 'if', 'value', 'is', 'Lazy', ':', 'getattr', '(', 'obj', ',', 'prop', '.', 'property_name', ')', 'value', '=', 'prop', '.', 'get_value_for_datastore', '(', 'obj', ')', 'else', ':', 'value', '=', 'obj', '[', 'name', ']', 'if', 'auto_convert', 'or', 'prop', '.', 'choices', ':', 'display', '=', 'prop', '.', 'get_display_value', '(', 'value', ')', 'else', ':', 'display', '=', 'value', 'if', 'isinstance', '(', 'field', ',', 'dict', ')', ':', 'initial', '=', 'field', '.', 'get', '(', "'verbose_name'", ',', 'None', ')', 'else', ':', 'initial', '=', "''", 'label', '=', 'initial', 'or', 'prop', '.', 'verbose_name', 'or', 'name', 'if', 'name', 'in', 'fields_convert_map', ':', 'convert', '=', 'fields_convert_map', '.', 'get', '(', 'name', ',', 'None', ')', 'else', ':', 'if', 'isinstance', '(', 'prop', ',', 'orm', '.', 'Property', ')', ':', 'convert', '=', 'types_convert_map', '.', 'get', '(', 'prop', '.', '__class__', ',', 'None', ')', 'if', 'not', 'convert', ':', 'convert', '=', 'default_convert_map', '.', 'get', '(', 'prop', '.', '__class__', ',', 'None', ')', 'convert_result', '=', 'None', 'if', 'convert', ':', 'convert_result', '=', 'convert', '(', 'value', ',', 'obj', ')', 'if', 'convert_result', 'is', 'None', ':', 'if', 'value', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'prop', ',', 'orm', '.', 'ManyToMany', ')', ':', 's', '=', '[', ']', '#support value parameter, the old value is already stored in "old_value" variable\r', 'if', 'old_value', 'is', 'not', '__default_value__', ':', 'if', 'prop', '.', 'reference_fieldname', '==', "'id'", ':', 'query', '=', '[', ']', 'for', '_id', 'in', 'old_value', ':', '_v', '=', 'functions', '.', 'get_cached_object', '(', 'prop', '.', 'reference_class', ',', '_id', ')', 'query', '.', 'append', '(', '_v', ')', 'else', ':', 'query', '=', 'prop', '.', 'reference_class', '.', 'filter', '(', 'prop', '.', 'reference_class', '.', 'c', '[', 'prop', '.', 'reversed_fieldname', ']', '.', 'in_', '(', 'old_value', ')', ')', 'else', ':', 'if', 'prop', '.', 'reference_fieldname', '==', "'id'", ':', 'query', '=', '[', ']', '_ids', '=', 'prop', '.', 'get_value_for_datastore', '(', 'obj', ',', 'cached', '=', 'True', ')', 'for', '_id', 'in', '_ids', ':', '_v', '=', 'functions', '.', 'get_cached_object', '(', 'prop', '.', 'reference_class', ',', '_id', ')', 'if', 'not', '_v', ':', 'log', '.', 'debug', '(', '"Can\'t find object %s:%d"', '%', '(', 'prop', '.', 'reference_class', '.', '__name__', ',', '_id', ')', ')', '_v', '=', '_id', 'query', '.', 'append', '(', '_v', ')', 'else', ':', 'query', '=', 'getattr', '(', 'obj', ',', 'prop', '.', 'property_name', ')', '.', 'all', '(', ')', 'for', 'x', 'in', 'query', ':', 'if', 'isinstance', '(', 'x', ',', 'orm', '.', 'Model', ')', ':', 's', '.', 'append', '(', 'get_obj_url', '(', 'x', ')', ')', 'else', ':', 's', '.', 'append', '(', 'str', '(', 'x', ')', ')', 'display', '=', "' '", '.', 'join', '(', 's', ')', 'elif', 'isinstance', '(', 'prop', ',', 'orm', '.', 'ReferenceProperty', ')', 'or', 'isinstance', '(', 'prop', ',', 'orm', '.', 'OneToOne', ')', ':', 'try', ':', 'if', 'old_value', 'is', 'not', '__default_value__', ':', 'd', '=', 'prop', '.', 'reference_class', '.', 'c', '[', 'prop', '.', 'reference_fieldname', ']', 'if', 'prop', '.', 'reference_fieldname', '==', "'id'", ':', 'v', '=', 'functions', '.', 'get_cached_object', '(', 'prop', '.', 'reference_class', ',', 'old_value', ')', 'else', ':', 'v', '=', 'prop', '.', 'reference_class', '.', 'get', '(', 'd', '==', 'old_value', ')', 'if', 'not', 'isinstance', '(', 'obj', ',', 'Model', ')', ':', 'd', '=', 'prop', '.', 'reference_class', '.', 'c', '[', 'prop', '.', 'reference_fieldname', ']', 'if', 'prop', '.', 'reference_fieldname', '==', "'id'", ':', 'v', '=', 'functions', '.', 'get_cached_object', '(', 'prop', '.', 'reference_class', ',', 'value', ')', 'else', ':', 'v', '=', 'prop', '.', 'reference_class', '.', 'get', '(', 'd', '==', 'value', ')', 'else', ':', 'if', 'prop', '.', 'reference_fieldname', '==', "'id'", ':', 'v', '=', 'functions', '.', 'get_cached_object', '(', 'prop', '.', 'reference_class', ',', 'obj', '.', 'get_datastore_value', '(', 'prop', '.', 'property_name', ')', ')', 'else', ':', 'v', '=', 'functions', '.', 'get_cached_object', '(', 'prop', '.', 'reference_class', ',', 'condition', '=', 'prop', '.', 'reference_class', '.', 'c', '[', 'prop', '.', 'reference_fieldname', ']', '==', 'obj', '.', 'get_datastore_value', '(', 'prop', '.', 'property_name', ')', ')', 'except', 'orm', '.', 'Error', ':', 'display', '=', 'prop', '.', 'get_datastore_value', '(', 'obj', ')', 'or', "''", 'v', '=', 'None', 'if', 'isinstance', '(', 'v', ',', 'Model', ')', ':', 'display', '=', 'get_obj_url', '(', 'v', ')', 'else', ':', 'display', '=', 'str', '(', 'v', 'if', 'v', 'is', 'not', 'None', 'else', "''", ')', 'elif', 'isinstance', '(', 'prop', ',', 'orm', '.', 'FileProperty', ')', ':', 'url', '=', 'functions', '.', 'get_href', '(', 'value', ')', 'if', 'url', ':', 'display', '=', 'str', '(', 'Tag', '(', "'a'", ',', 'value', ',', 'href', '=', 'url', ')', ')', 'else', ':', 'display', '=', "''", '# if isinstance(prop, orm.Property) and prop.choices is not None:\r', '# display = prop.get_display_value(value)\r', 'if', 'prop', '.', '__class__', 'is', 'orm', '.', 'TextProperty', ':', 'display', '=', 'text2html', '(', 'value', ')', 'else', ':', 'display', '=', 'convert_result', 'if', 'isinstance', '(', 'display', ',', 'unicode', ')', ':', 'display', '=', 'display', '.', 'encode', '(', "'utf-8'", ')', 'if', 'display', 'is', 'None', ':', 'display', '=', "''", 'return', 'Storage', '(', '{', "'label'", ':', 'label', ',', "'value'", ':', 'value', ',', "'display'", ':', 'display', ',', "'name'", ':', 'name', '}', ')']
If auto_convert, then all values will be converted to string format, otherwise remain the orignal value
['If', 'auto_convert', 'then', 'all', 'values', 'will', 'be', 'converted', 'to', 'string', 'format', 'otherwise', 'remain', 'the', 'orignal', 'value']
train
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L717-L860
4,725
ericjang/tdb
tdb/debug_session.py
DebugSession.s
def s(self): """ step to the next node in the execution order """ next_node=self._exe_order[self.step] self._eval(next_node) self.step+=1 if self.step==len(self._exe_order): return self._finish() else: # if stepping, return the value of the node we just # evaled return self._break(value=self._cache.get(next_node.name))
python
def s(self): """ step to the next node in the execution order """ next_node=self._exe_order[self.step] self._eval(next_node) self.step+=1 if self.step==len(self._exe_order): return self._finish() else: # if stepping, return the value of the node we just # evaled return self._break(value=self._cache.get(next_node.name))
['def', 's', '(', 'self', ')', ':', 'next_node', '=', 'self', '.', '_exe_order', '[', 'self', '.', 'step', ']', 'self', '.', '_eval', '(', 'next_node', ')', 'self', '.', 'step', '+=', '1', 'if', 'self', '.', 'step', '==', 'len', '(', 'self', '.', '_exe_order', ')', ':', 'return', 'self', '.', '_finish', '(', ')', 'else', ':', '# if stepping, return the value of the node we just', '# evaled', 'return', 'self', '.', '_break', '(', 'value', '=', 'self', '.', '_cache', '.', 'get', '(', 'next_node', '.', 'name', ')', ')']
step to the next node in the execution order
['step', 'to', 'the', 'next', 'node', 'in', 'the', 'execution', 'order']
train
https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L63-L75
4,726
fabioz/PyDev.Debugger
_pydevd_bundle/pydevd_comm.py
build_exception_info_response
def build_exception_info_response(dbg, thread_id, request_seq, set_additional_thread_info, iter_visible_frames_info, max_frames): ''' :return ExceptionInfoResponse ''' thread = pydevd_find_thread_by_id(thread_id) additional_info = set_additional_thread_info(thread) topmost_frame = additional_info.get_topmost_frame(thread) frames = [] exc_type = None exc_desc = None if topmost_frame is not None: frame_id_to_lineno = {} try: trace_obj = None frame = topmost_frame while frame is not None: if frame.f_code.co_name == 'do_wait_suspend' and frame.f_code.co_filename.endswith('pydevd.py'): arg = frame.f_locals.get('arg', None) if arg is not None: exc_type, exc_desc, trace_obj = arg break frame = frame.f_back while trace_obj.tb_next is not None: trace_obj = trace_obj.tb_next info = dbg.suspended_frames_manager.get_topmost_frame_and_frame_id_to_line(thread_id) if info is not None: topmost_frame, frame_id_to_lineno = info if trace_obj is not None: for frame_id, frame, method_name, original_filename, filename_in_utf8, lineno in iter_visible_frames_info( dbg, trace_obj.tb_frame, frame_id_to_lineno): line_text = linecache.getline(original_filename, lineno) # Never filter out plugin frames! if not getattr(frame, 'IS_PLUGIN_FRAME', False): if dbg.is_files_filter_enabled and dbg.apply_files_filter(frame, original_filename, False): continue frames.append((filename_in_utf8, lineno, method_name, line_text)) finally: topmost_frame = None name = 'exception: type unknown' if exc_type is not None: try: name = exc_type.__qualname__ except: try: name = exc_type.__name__ except: try: name = str(exc_type) except: pass description = 'exception: no description' if exc_desc is not None: try: description = str(exc_desc) except: pass stack_str = ''.join(traceback.format_list(frames[-max_frames:])) # This is an extra bit of data used by Visual Studio source_path = frames[0][0] if frames else '' if thread.stop_reason == CMD_STEP_CAUGHT_EXCEPTION: break_mode = pydevd_schema.ExceptionBreakMode.ALWAYS else: break_mode = pydevd_schema.ExceptionBreakMode.UNHANDLED response = pydevd_schema.ExceptionInfoResponse( request_seq=request_seq, success=True, command='exceptionInfo', body=pydevd_schema.ExceptionInfoResponseBody( exceptionId=name, description=description, breakMode=break_mode, details=pydevd_schema.ExceptionDetails( message=description, typeName=name, stackTrace=stack_str, source=source_path ) ) ) return response
python
def build_exception_info_response(dbg, thread_id, request_seq, set_additional_thread_info, iter_visible_frames_info, max_frames): ''' :return ExceptionInfoResponse ''' thread = pydevd_find_thread_by_id(thread_id) additional_info = set_additional_thread_info(thread) topmost_frame = additional_info.get_topmost_frame(thread) frames = [] exc_type = None exc_desc = None if topmost_frame is not None: frame_id_to_lineno = {} try: trace_obj = None frame = topmost_frame while frame is not None: if frame.f_code.co_name == 'do_wait_suspend' and frame.f_code.co_filename.endswith('pydevd.py'): arg = frame.f_locals.get('arg', None) if arg is not None: exc_type, exc_desc, trace_obj = arg break frame = frame.f_back while trace_obj.tb_next is not None: trace_obj = trace_obj.tb_next info = dbg.suspended_frames_manager.get_topmost_frame_and_frame_id_to_line(thread_id) if info is not None: topmost_frame, frame_id_to_lineno = info if trace_obj is not None: for frame_id, frame, method_name, original_filename, filename_in_utf8, lineno in iter_visible_frames_info( dbg, trace_obj.tb_frame, frame_id_to_lineno): line_text = linecache.getline(original_filename, lineno) # Never filter out plugin frames! if not getattr(frame, 'IS_PLUGIN_FRAME', False): if dbg.is_files_filter_enabled and dbg.apply_files_filter(frame, original_filename, False): continue frames.append((filename_in_utf8, lineno, method_name, line_text)) finally: topmost_frame = None name = 'exception: type unknown' if exc_type is not None: try: name = exc_type.__qualname__ except: try: name = exc_type.__name__ except: try: name = str(exc_type) except: pass description = 'exception: no description' if exc_desc is not None: try: description = str(exc_desc) except: pass stack_str = ''.join(traceback.format_list(frames[-max_frames:])) # This is an extra bit of data used by Visual Studio source_path = frames[0][0] if frames else '' if thread.stop_reason == CMD_STEP_CAUGHT_EXCEPTION: break_mode = pydevd_schema.ExceptionBreakMode.ALWAYS else: break_mode = pydevd_schema.ExceptionBreakMode.UNHANDLED response = pydevd_schema.ExceptionInfoResponse( request_seq=request_seq, success=True, command='exceptionInfo', body=pydevd_schema.ExceptionInfoResponseBody( exceptionId=name, description=description, breakMode=break_mode, details=pydevd_schema.ExceptionDetails( message=description, typeName=name, stackTrace=stack_str, source=source_path ) ) ) return response
['def', 'build_exception_info_response', '(', 'dbg', ',', 'thread_id', ',', 'request_seq', ',', 'set_additional_thread_info', ',', 'iter_visible_frames_info', ',', 'max_frames', ')', ':', 'thread', '=', 'pydevd_find_thread_by_id', '(', 'thread_id', ')', 'additional_info', '=', 'set_additional_thread_info', '(', 'thread', ')', 'topmost_frame', '=', 'additional_info', '.', 'get_topmost_frame', '(', 'thread', ')', 'frames', '=', '[', ']', 'exc_type', '=', 'None', 'exc_desc', '=', 'None', 'if', 'topmost_frame', 'is', 'not', 'None', ':', 'frame_id_to_lineno', '=', '{', '}', 'try', ':', 'trace_obj', '=', 'None', 'frame', '=', 'topmost_frame', 'while', 'frame', 'is', 'not', 'None', ':', 'if', 'frame', '.', 'f_code', '.', 'co_name', '==', "'do_wait_suspend'", 'and', 'frame', '.', 'f_code', '.', 'co_filename', '.', 'endswith', '(', "'pydevd.py'", ')', ':', 'arg', '=', 'frame', '.', 'f_locals', '.', 'get', '(', "'arg'", ',', 'None', ')', 'if', 'arg', 'is', 'not', 'None', ':', 'exc_type', ',', 'exc_desc', ',', 'trace_obj', '=', 'arg', 'break', 'frame', '=', 'frame', '.', 'f_back', 'while', 'trace_obj', '.', 'tb_next', 'is', 'not', 'None', ':', 'trace_obj', '=', 'trace_obj', '.', 'tb_next', 'info', '=', 'dbg', '.', 'suspended_frames_manager', '.', 'get_topmost_frame_and_frame_id_to_line', '(', 'thread_id', ')', 'if', 'info', 'is', 'not', 'None', ':', 'topmost_frame', ',', 'frame_id_to_lineno', '=', 'info', 'if', 'trace_obj', 'is', 'not', 'None', ':', 'for', 'frame_id', ',', 'frame', ',', 'method_name', ',', 'original_filename', ',', 'filename_in_utf8', ',', 'lineno', 'in', 'iter_visible_frames_info', '(', 'dbg', ',', 'trace_obj', '.', 'tb_frame', ',', 'frame_id_to_lineno', ')', ':', 'line_text', '=', 'linecache', '.', 'getline', '(', 'original_filename', ',', 'lineno', ')', '# Never filter out plugin frames!', 'if', 'not', 'getattr', '(', 'frame', ',', "'IS_PLUGIN_FRAME'", ',', 'False', ')', ':', 'if', 'dbg', '.', 'is_files_filter_enabled', 'and', 'dbg', '.', 'apply_files_filter', '(', 'frame', ',', 'original_filename', ',', 'False', ')', ':', 'continue', 'frames', '.', 'append', '(', '(', 'filename_in_utf8', ',', 'lineno', ',', 'method_name', ',', 'line_text', ')', ')', 'finally', ':', 'topmost_frame', '=', 'None', 'name', '=', "'exception: type unknown'", 'if', 'exc_type', 'is', 'not', 'None', ':', 'try', ':', 'name', '=', 'exc_type', '.', '__qualname__', 'except', ':', 'try', ':', 'name', '=', 'exc_type', '.', '__name__', 'except', ':', 'try', ':', 'name', '=', 'str', '(', 'exc_type', ')', 'except', ':', 'pass', 'description', '=', "'exception: no description'", 'if', 'exc_desc', 'is', 'not', 'None', ':', 'try', ':', 'description', '=', 'str', '(', 'exc_desc', ')', 'except', ':', 'pass', 'stack_str', '=', "''", '.', 'join', '(', 'traceback', '.', 'format_list', '(', 'frames', '[', '-', 'max_frames', ':', ']', ')', ')', '# This is an extra bit of data used by Visual Studio', 'source_path', '=', 'frames', '[', '0', ']', '[', '0', ']', 'if', 'frames', 'else', "''", 'if', 'thread', '.', 'stop_reason', '==', 'CMD_STEP_CAUGHT_EXCEPTION', ':', 'break_mode', '=', 'pydevd_schema', '.', 'ExceptionBreakMode', '.', 'ALWAYS', 'else', ':', 'break_mode', '=', 'pydevd_schema', '.', 'ExceptionBreakMode', '.', 'UNHANDLED', 'response', '=', 'pydevd_schema', '.', 'ExceptionInfoResponse', '(', 'request_seq', '=', 'request_seq', ',', 'success', '=', 'True', ',', 'command', '=', "'exceptionInfo'", ',', 'body', '=', 'pydevd_schema', '.', 'ExceptionInfoResponseBody', '(', 'exceptionId', '=', 'name', ',', 'description', '=', 'description', ',', 'breakMode', '=', 'break_mode', ',', 'details', '=', 'pydevd_schema', '.', 'ExceptionDetails', '(', 'message', '=', 'description', ',', 'typeName', '=', 'name', ',', 'stackTrace', '=', 'stack_str', ',', 'source', '=', 'source_path', ')', ')', ')', 'return', 'response']
:return ExceptionInfoResponse
[':', 'return', 'ExceptionInfoResponse']
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_comm.py#L1063-L1154
4,727
urbn/Caesium
caesium/handler.py
BaseHandler.get_mongo_query_from_arguments
def get_mongo_query_from_arguments(self, reserved_attributes=[]): """Generate a mongo query from the given URL query parameters, handles OR query via multiples :param list reserved_attributes: A list of attributes you want to exclude from this particular query :return: dict """ query = {} for arg in self.request.arguments: if arg not in reserved_attributes: if len(self.request.arguments.get(arg)) > 1: query["$or"] = [] for val in self.request.arguments.get(arg): query["$or"].append({arg: self.get_arg_value_as_type(val)}) else: query[arg] = self.get_arg_value_as_type(self.request.arguments.get(arg)[0]) return query
python
def get_mongo_query_from_arguments(self, reserved_attributes=[]): """Generate a mongo query from the given URL query parameters, handles OR query via multiples :param list reserved_attributes: A list of attributes you want to exclude from this particular query :return: dict """ query = {} for arg in self.request.arguments: if arg not in reserved_attributes: if len(self.request.arguments.get(arg)) > 1: query["$or"] = [] for val in self.request.arguments.get(arg): query["$or"].append({arg: self.get_arg_value_as_type(val)}) else: query[arg] = self.get_arg_value_as_type(self.request.arguments.get(arg)[0]) return query
['def', 'get_mongo_query_from_arguments', '(', 'self', ',', 'reserved_attributes', '=', '[', ']', ')', ':', 'query', '=', '{', '}', 'for', 'arg', 'in', 'self', '.', 'request', '.', 'arguments', ':', 'if', 'arg', 'not', 'in', 'reserved_attributes', ':', 'if', 'len', '(', 'self', '.', 'request', '.', 'arguments', '.', 'get', '(', 'arg', ')', ')', '>', '1', ':', 'query', '[', '"$or"', ']', '=', '[', ']', 'for', 'val', 'in', 'self', '.', 'request', '.', 'arguments', '.', 'get', '(', 'arg', ')', ':', 'query', '[', '"$or"', ']', '.', 'append', '(', '{', 'arg', ':', 'self', '.', 'get_arg_value_as_type', '(', 'val', ')', '}', ')', 'else', ':', 'query', '[', 'arg', ']', '=', 'self', '.', 'get_arg_value_as_type', '(', 'self', '.', 'request', '.', 'arguments', '.', 'get', '(', 'arg', ')', '[', '0', ']', ')', 'return', 'query']
Generate a mongo query from the given URL query parameters, handles OR query via multiples :param list reserved_attributes: A list of attributes you want to exclude from this particular query :return: dict
['Generate', 'a', 'mongo', 'query', 'from', 'the', 'given', 'URL', 'query', 'parameters', 'handles', 'OR', 'query', 'via', 'multiples']
train
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L108-L125
4,728
biocore/burrito-fillings
bfillings/mothur.py
Mothur.__get_method_abbrev
def __get_method_abbrev(self): """Abbreviated form of clustering method parameter. Used to guess output filenames for MOTHUR. """ abbrevs = { 'furthest': 'fn', 'nearest': 'nn', 'average': 'an', } if self.Parameters['method'].isOn(): method = self.Parameters['method'].Value else: method = self.Parameters['method'].Default return abbrevs[method]
python
def __get_method_abbrev(self): """Abbreviated form of clustering method parameter. Used to guess output filenames for MOTHUR. """ abbrevs = { 'furthest': 'fn', 'nearest': 'nn', 'average': 'an', } if self.Parameters['method'].isOn(): method = self.Parameters['method'].Value else: method = self.Parameters['method'].Default return abbrevs[method]
['def', '__get_method_abbrev', '(', 'self', ')', ':', 'abbrevs', '=', '{', "'furthest'", ':', "'fn'", ',', "'nearest'", ':', "'nn'", ',', "'average'", ':', "'an'", ',', '}', 'if', 'self', '.', 'Parameters', '[', "'method'", ']', '.', 'isOn', '(', ')', ':', 'method', '=', 'self', '.', 'Parameters', '[', "'method'", ']', '.', 'Value', 'else', ':', 'method', '=', 'self', '.', 'Parameters', '[', "'method'", ']', '.', 'Default', 'return', 'abbrevs', '[', 'method', ']']
Abbreviated form of clustering method parameter. Used to guess output filenames for MOTHUR.
['Abbreviated', 'form', 'of', 'clustering', 'method', 'parameter', '.']
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/mothur.py#L292-L306
4,729
materialsproject/pymatgen
pymatgen/command_line/aconvasp_caller.py
run_aconvasp_command
def run_aconvasp_command(command, structure): """ Helper function for calling aconvasp with different arguments """ poscar = Poscar(structure) p = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) output = p.communicate(input=poscar.get_string()) return output
python
def run_aconvasp_command(command, structure): """ Helper function for calling aconvasp with different arguments """ poscar = Poscar(structure) p = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) output = p.communicate(input=poscar.get_string()) return output
['def', 'run_aconvasp_command', '(', 'command', ',', 'structure', ')', ':', 'poscar', '=', 'Poscar', '(', 'structure', ')', 'p', '=', 'subprocess', '.', 'Popen', '(', 'command', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stdin', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', 'output', '=', 'p', '.', 'communicate', '(', 'input', '=', 'poscar', '.', 'get_string', '(', ')', ')', 'return', 'output']
Helper function for calling aconvasp with different arguments
['Helper', 'function', 'for', 'calling', 'aconvasp', 'with', 'different', 'arguments']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/command_line/aconvasp_caller.py#L25-L34
4,730
openbermuda/ripl
ripl/slidelayout.py
SlideLayout.layout
def layout(self, slide): """ Return layout information for slide """ image = Image.new('RGB', (WIDTH, HEIGHT), 'black') draw = ImageDraw.Draw(image) draw.font = self.font self.vertical_layout(draw, slide) self.horizontal_layout(draw, slide) return slide
python
def layout(self, slide): """ Return layout information for slide """ image = Image.new('RGB', (WIDTH, HEIGHT), 'black') draw = ImageDraw.Draw(image) draw.font = self.font self.vertical_layout(draw, slide) self.horizontal_layout(draw, slide) return slide
['def', 'layout', '(', 'self', ',', 'slide', ')', ':', 'image', '=', 'Image', '.', 'new', '(', "'RGB'", ',', '(', 'WIDTH', ',', 'HEIGHT', ')', ',', "'black'", ')', 'draw', '=', 'ImageDraw', '.', 'Draw', '(', 'image', ')', 'draw', '.', 'font', '=', 'self', '.', 'font', 'self', '.', 'vertical_layout', '(', 'draw', ',', 'slide', ')', 'self', '.', 'horizontal_layout', '(', 'draw', ',', 'slide', ')', 'return', 'slide']
Return layout information for slide
['Return', 'layout', 'information', 'for', 'slide']
train
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/slidelayout.py#L45-L56
4,731
apache/spark
python/pyspark/rdd.py
RDD.map
def map(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each element of this RDD. >>> rdd = sc.parallelize(["b", "a", "c"]) >>> sorted(rdd.map(lambda x: (x, 1)).collect()) [('a', 1), ('b', 1), ('c', 1)] """ def func(_, iterator): return map(fail_on_stopiteration(f), iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning)
python
def map(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each element of this RDD. >>> rdd = sc.parallelize(["b", "a", "c"]) >>> sorted(rdd.map(lambda x: (x, 1)).collect()) [('a', 1), ('b', 1), ('c', 1)] """ def func(_, iterator): return map(fail_on_stopiteration(f), iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning)
['def', 'map', '(', 'self', ',', 'f', ',', 'preservesPartitioning', '=', 'False', ')', ':', 'def', 'func', '(', '_', ',', 'iterator', ')', ':', 'return', 'map', '(', 'fail_on_stopiteration', '(', 'f', ')', ',', 'iterator', ')', 'return', 'self', '.', 'mapPartitionsWithIndex', '(', 'func', ',', 'preservesPartitioning', ')']
Return a new RDD by applying a function to each element of this RDD. >>> rdd = sc.parallelize(["b", "a", "c"]) >>> sorted(rdd.map(lambda x: (x, 1)).collect()) [('a', 1), ('b', 1), ('c', 1)]
['Return', 'a', 'new', 'RDD', 'by', 'applying', 'a', 'function', 'to', 'each', 'element', 'of', 'this', 'RDD', '.']
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L317-L327
4,732
pypa/pipenv
pipenv/vendor/attr/_make.py
fields_dict
def fields_dict(cls): """ Return an ordered dictionary of ``attrs`` attributes for a class, whose keys are the attribute names. :param type cls: Class to introspect. :raise TypeError: If *cls* is not a class. :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` class. :rtype: an ordered dict where keys are attribute names and values are :class:`attr.Attribute`\\ s. This will be a :class:`dict` if it's naturally ordered like on Python 3.6+ or an :class:`~collections.OrderedDict` otherwise. .. versionadded:: 18.1.0 """ if not isclass(cls): raise TypeError("Passed object must be a class.") attrs = getattr(cls, "__attrs_attrs__", None) if attrs is None: raise NotAnAttrsClassError( "{cls!r} is not an attrs-decorated class.".format(cls=cls) ) return ordered_dict(((a.name, a) for a in attrs))
python
def fields_dict(cls): """ Return an ordered dictionary of ``attrs`` attributes for a class, whose keys are the attribute names. :param type cls: Class to introspect. :raise TypeError: If *cls* is not a class. :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` class. :rtype: an ordered dict where keys are attribute names and values are :class:`attr.Attribute`\\ s. This will be a :class:`dict` if it's naturally ordered like on Python 3.6+ or an :class:`~collections.OrderedDict` otherwise. .. versionadded:: 18.1.0 """ if not isclass(cls): raise TypeError("Passed object must be a class.") attrs = getattr(cls, "__attrs_attrs__", None) if attrs is None: raise NotAnAttrsClassError( "{cls!r} is not an attrs-decorated class.".format(cls=cls) ) return ordered_dict(((a.name, a) for a in attrs))
['def', 'fields_dict', '(', 'cls', ')', ':', 'if', 'not', 'isclass', '(', 'cls', ')', ':', 'raise', 'TypeError', '(', '"Passed object must be a class."', ')', 'attrs', '=', 'getattr', '(', 'cls', ',', '"__attrs_attrs__"', ',', 'None', ')', 'if', 'attrs', 'is', 'None', ':', 'raise', 'NotAnAttrsClassError', '(', '"{cls!r} is not an attrs-decorated class."', '.', 'format', '(', 'cls', '=', 'cls', ')', ')', 'return', 'ordered_dict', '(', '(', '(', 'a', '.', 'name', ',', 'a', ')', 'for', 'a', 'in', 'attrs', ')', ')']
Return an ordered dictionary of ``attrs`` attributes for a class, whose keys are the attribute names. :param type cls: Class to introspect. :raise TypeError: If *cls* is not a class. :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` class. :rtype: an ordered dict where keys are attribute names and values are :class:`attr.Attribute`\\ s. This will be a :class:`dict` if it's naturally ordered like on Python 3.6+ or an :class:`~collections.OrderedDict` otherwise. .. versionadded:: 18.1.0
['Return', 'an', 'ordered', 'dictionary', 'of', 'attrs', 'attributes', 'for', 'a', 'class', 'whose', 'keys', 'are', 'the', 'attribute', 'names', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/attr/_make.py#L1339-L1364
4,733
SBRG/ssbio
ssbio/protein/sequence/properties/kinetic_folding_rate.py
get_foldrate_at_temp
def get_foldrate_at_temp(ref_rate, new_temp, ref_temp=37.0): """Scale the predicted kinetic folding rate of a protein to temperature T, based on the relationship ln(k_f)∝1/T Args: ref_rate (float): Kinetic folding rate calculated from the function :func:`~ssbio.protein.sequence.properties.kinetic_folding_rate.get_foldrate` new_temp (float): Temperature in degrees C ref_temp (float): Reference temperature, default to 37 C Returns: float: Kinetic folding rate k_f at temperature T """ # Not much data available on this slope value, however its effect on growth rate in a model is very small slope = 22000 # Get folding rate for the reference temperature preFactor = float(ref_rate) + slope / (float(ref_temp) + 273.15) # Calculate folding rate at desired temperature rate = math.exp(preFactor - slope / (float(new_temp) + 273.15)) return rate
python
def get_foldrate_at_temp(ref_rate, new_temp, ref_temp=37.0): """Scale the predicted kinetic folding rate of a protein to temperature T, based on the relationship ln(k_f)∝1/T Args: ref_rate (float): Kinetic folding rate calculated from the function :func:`~ssbio.protein.sequence.properties.kinetic_folding_rate.get_foldrate` new_temp (float): Temperature in degrees C ref_temp (float): Reference temperature, default to 37 C Returns: float: Kinetic folding rate k_f at temperature T """ # Not much data available on this slope value, however its effect on growth rate in a model is very small slope = 22000 # Get folding rate for the reference temperature preFactor = float(ref_rate) + slope / (float(ref_temp) + 273.15) # Calculate folding rate at desired temperature rate = math.exp(preFactor - slope / (float(new_temp) + 273.15)) return rate
['def', 'get_foldrate_at_temp', '(', 'ref_rate', ',', 'new_temp', ',', 'ref_temp', '=', '37.0', ')', ':', '# Not much data available on this slope value, however its effect on growth rate in a model is very small', 'slope', '=', '22000', '# Get folding rate for the reference temperature', 'preFactor', '=', 'float', '(', 'ref_rate', ')', '+', 'slope', '/', '(', 'float', '(', 'ref_temp', ')', '+', '273.15', ')', '# Calculate folding rate at desired temperature', 'rate', '=', 'math', '.', 'exp', '(', 'preFactor', '-', 'slope', '/', '(', 'float', '(', 'new_temp', ')', '+', '273.15', ')', ')', 'return', 'rate']
Scale the predicted kinetic folding rate of a protein to temperature T, based on the relationship ln(k_f)∝1/T Args: ref_rate (float): Kinetic folding rate calculated from the function :func:`~ssbio.protein.sequence.properties.kinetic_folding_rate.get_foldrate` new_temp (float): Temperature in degrees C ref_temp (float): Reference temperature, default to 37 C Returns: float: Kinetic folding rate k_f at temperature T
['Scale', 'the', 'predicted', 'kinetic', 'folding', 'rate', 'of', 'a', 'protein', 'to', 'temperature', 'T', 'based', 'on', 'the', 'relationship', 'ln', '(', 'k_f', ')', '∝1', '/', 'T']
train
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/kinetic_folding_rate.py#L55-L77
4,734
fastai/fastai
fastai/text/models/awd_lstm.py
AWD_LSTM.reset
def reset(self): "Reset the hidden states." [r.reset() for r in self.rnns if hasattr(r, 'reset')] if self.qrnn: self.hidden = [self._one_hidden(l) for l in range(self.n_layers)] else: self.hidden = [(self._one_hidden(l), self._one_hidden(l)) for l in range(self.n_layers)]
python
def reset(self): "Reset the hidden states." [r.reset() for r in self.rnns if hasattr(r, 'reset')] if self.qrnn: self.hidden = [self._one_hidden(l) for l in range(self.n_layers)] else: self.hidden = [(self._one_hidden(l), self._one_hidden(l)) for l in range(self.n_layers)]
['def', 'reset', '(', 'self', ')', ':', '[', 'r', '.', 'reset', '(', ')', 'for', 'r', 'in', 'self', '.', 'rnns', 'if', 'hasattr', '(', 'r', ',', "'reset'", ')', ']', 'if', 'self', '.', 'qrnn', ':', 'self', '.', 'hidden', '=', '[', 'self', '.', '_one_hidden', '(', 'l', ')', 'for', 'l', 'in', 'range', '(', 'self', '.', 'n_layers', ')', ']', 'else', ':', 'self', '.', 'hidden', '=', '[', '(', 'self', '.', '_one_hidden', '(', 'l', ')', ',', 'self', '.', '_one_hidden', '(', 'l', ')', ')', 'for', 'l', 'in', 'range', '(', 'self', '.', 'n_layers', ')', ']']
Reset the hidden states.
['Reset', 'the', 'hidden', 'states', '.']
train
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/text/models/awd_lstm.py#L135-L139
4,735
pysathq/pysat
pysat/formula.py
CNF.from_file
def from_file(self, fname, comment_lead=['c'], compressed_with='use_ext'): """ Read a CNF formula from a file in the DIMACS format. A file name is expected as an argument. A default argument is ``comment_lead`` for parsing comment lines. A given file can be compressed by either gzip, bzip2, or lzma. :param fname: name of a file to parse. :param comment_lead: a list of characters leading comment lines :param compressed_with: file compression algorithm :type fname: str :type comment_lead: list(str) :type compressed_with: str Note that the ``compressed_with`` parameter can be ``None`` (i.e. the file is uncompressed), ``'gzip'``, ``'bzip2'``, ``'lzma'``, or ``'use_ext'``. The latter value indicates that compression type should be automatically determined based on the file extension. Using ``'lzma'`` in Python 2 requires the ``backports.lzma`` package to be additionally installed. Usage example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf1 = CNF() >>> cnf1.from_file('some-file.cnf.gz', compressed_with='gzip') >>> >>> cnf2 = CNF(from_file='another-file.cnf') """ with FileObject(fname, mode='r', compression=compressed_with) as fobj: self.from_fp(fobj.fp, comment_lead)
python
def from_file(self, fname, comment_lead=['c'], compressed_with='use_ext'): """ Read a CNF formula from a file in the DIMACS format. A file name is expected as an argument. A default argument is ``comment_lead`` for parsing comment lines. A given file can be compressed by either gzip, bzip2, or lzma. :param fname: name of a file to parse. :param comment_lead: a list of characters leading comment lines :param compressed_with: file compression algorithm :type fname: str :type comment_lead: list(str) :type compressed_with: str Note that the ``compressed_with`` parameter can be ``None`` (i.e. the file is uncompressed), ``'gzip'``, ``'bzip2'``, ``'lzma'``, or ``'use_ext'``. The latter value indicates that compression type should be automatically determined based on the file extension. Using ``'lzma'`` in Python 2 requires the ``backports.lzma`` package to be additionally installed. Usage example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf1 = CNF() >>> cnf1.from_file('some-file.cnf.gz', compressed_with='gzip') >>> >>> cnf2 = CNF(from_file='another-file.cnf') """ with FileObject(fname, mode='r', compression=compressed_with) as fobj: self.from_fp(fobj.fp, comment_lead)
['def', 'from_file', '(', 'self', ',', 'fname', ',', 'comment_lead', '=', '[', "'c'", ']', ',', 'compressed_with', '=', "'use_ext'", ')', ':', 'with', 'FileObject', '(', 'fname', ',', 'mode', '=', "'r'", ',', 'compression', '=', 'compressed_with', ')', 'as', 'fobj', ':', 'self', '.', 'from_fp', '(', 'fobj', '.', 'fp', ',', 'comment_lead', ')']
Read a CNF formula from a file in the DIMACS format. A file name is expected as an argument. A default argument is ``comment_lead`` for parsing comment lines. A given file can be compressed by either gzip, bzip2, or lzma. :param fname: name of a file to parse. :param comment_lead: a list of characters leading comment lines :param compressed_with: file compression algorithm :type fname: str :type comment_lead: list(str) :type compressed_with: str Note that the ``compressed_with`` parameter can be ``None`` (i.e. the file is uncompressed), ``'gzip'``, ``'bzip2'``, ``'lzma'``, or ``'use_ext'``. The latter value indicates that compression type should be automatically determined based on the file extension. Using ``'lzma'`` in Python 2 requires the ``backports.lzma`` package to be additionally installed. Usage example: .. code-block:: python >>> from pysat.formula import CNF >>> cnf1 = CNF() >>> cnf1.from_file('some-file.cnf.gz', compressed_with='gzip') >>> >>> cnf2 = CNF(from_file='another-file.cnf')
['Read', 'a', 'CNF', 'formula', 'from', 'a', 'file', 'in', 'the', 'DIMACS', 'format', '.', 'A', 'file', 'name', 'is', 'expected', 'as', 'an', 'argument', '.', 'A', 'default', 'argument', 'is', 'comment_lead', 'for', 'parsing', 'comment', 'lines', '.', 'A', 'given', 'file', 'can', 'be', 'compressed', 'by', 'either', 'gzip', 'bzip2', 'or', 'lzma', '.']
train
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/formula.py#L409-L443
4,736
openid/python-openid
openid/extensions/draft/pape5.py
PAPEExtension._getAlias
def _getAlias(self, auth_level_uri): """Return the alias for the specified auth level URI. @raises KeyError: if no alias is defined """ for (alias, existing_uri) in self.auth_level_aliases.iteritems(): if auth_level_uri == existing_uri: return alias raise KeyError(auth_level_uri)
python
def _getAlias(self, auth_level_uri): """Return the alias for the specified auth level URI. @raises KeyError: if no alias is defined """ for (alias, existing_uri) in self.auth_level_aliases.iteritems(): if auth_level_uri == existing_uri: return alias raise KeyError(auth_level_uri)
['def', '_getAlias', '(', 'self', ',', 'auth_level_uri', ')', ':', 'for', '(', 'alias', ',', 'existing_uri', ')', 'in', 'self', '.', 'auth_level_aliases', '.', 'iteritems', '(', ')', ':', 'if', 'auth_level_uri', '==', 'existing_uri', ':', 'return', 'alias', 'raise', 'KeyError', '(', 'auth_level_uri', ')']
Return the alias for the specified auth level URI. @raises KeyError: if no alias is defined
['Return', 'the', 'alias', 'for', 'the', 'specified', 'auth', 'level', 'URI', '.']
train
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/draft/pape5.py#L81-L90
4,737
romana/vpc-router
vpcrouter/vpc/__init__.py
find_instance_and_eni_by_ip
def find_instance_and_eni_by_ip(vpc_info, ip): """ Given a specific IP address, find the EC2 instance and ENI. We need this information for setting the route. Returns instance and emi in a tuple. """ for instance in vpc_info['instances']: for eni in instance.interfaces: for pa in eni.private_ip_addresses: if pa.private_ip_address == ip: return instance, eni raise VpcRouteSetError("Could not find instance/eni for '%s' " "in VPC '%s'." % (ip, vpc_info['vpc'].id))
python
def find_instance_and_eni_by_ip(vpc_info, ip): """ Given a specific IP address, find the EC2 instance and ENI. We need this information for setting the route. Returns instance and emi in a tuple. """ for instance in vpc_info['instances']: for eni in instance.interfaces: for pa in eni.private_ip_addresses: if pa.private_ip_address == ip: return instance, eni raise VpcRouteSetError("Could not find instance/eni for '%s' " "in VPC '%s'." % (ip, vpc_info['vpc'].id))
['def', 'find_instance_and_eni_by_ip', '(', 'vpc_info', ',', 'ip', ')', ':', 'for', 'instance', 'in', 'vpc_info', '[', "'instances'", ']', ':', 'for', 'eni', 'in', 'instance', '.', 'interfaces', ':', 'for', 'pa', 'in', 'eni', '.', 'private_ip_addresses', ':', 'if', 'pa', '.', 'private_ip_address', '==', 'ip', ':', 'return', 'instance', ',', 'eni', 'raise', 'VpcRouteSetError', '(', '"Could not find instance/eni for \'%s\' "', '"in VPC \'%s\'."', '%', '(', 'ip', ',', 'vpc_info', '[', "'vpc'", ']', '.', 'id', ')', ')']
Given a specific IP address, find the EC2 instance and ENI. We need this information for setting the route. Returns instance and emi in a tuple.
['Given', 'a', 'specific', 'IP', 'address', 'find', 'the', 'EC2', 'instance', 'and', 'ENI', '.']
train
https://github.com/romana/vpc-router/blob/d696c2e023f1111ceb61f9c6fbabfafed8e14040/vpcrouter/vpc/__init__.py#L168-L183
4,738
iotile/coretools
iotilebuild/iotile/build/config/site_scons/autobuild.py
autobuild_arm_program
def autobuild_arm_program(elfname, test_dir=os.path.join('firmware', 'test'), patch=True): """ Build the an ARM module for all targets and build all unit tests. If pcb files are given, also build those. """ try: #Build for all targets family = utilities.get_family('module_settings.json') family.for_all_targets(family.tile.short_name, lambda x: arm.build_program(family.tile, elfname, x, patch=patch)) #Build all unit tests unit_test.build_units(os.path.join('firmware','test'), family.targets(family.tile.short_name)) Alias('release', os.path.join('build', 'output')) Alias('test', os.path.join('build', 'test', 'output')) Default(['release', 'test']) autobuild_release(family) if os.path.exists('doc'): autobuild_documentation(family.tile) except IOTileException as e: print(e.format()) sys.exit(1)
python
def autobuild_arm_program(elfname, test_dir=os.path.join('firmware', 'test'), patch=True): """ Build the an ARM module for all targets and build all unit tests. If pcb files are given, also build those. """ try: #Build for all targets family = utilities.get_family('module_settings.json') family.for_all_targets(family.tile.short_name, lambda x: arm.build_program(family.tile, elfname, x, patch=patch)) #Build all unit tests unit_test.build_units(os.path.join('firmware','test'), family.targets(family.tile.short_name)) Alias('release', os.path.join('build', 'output')) Alias('test', os.path.join('build', 'test', 'output')) Default(['release', 'test']) autobuild_release(family) if os.path.exists('doc'): autobuild_documentation(family.tile) except IOTileException as e: print(e.format()) sys.exit(1)
['def', 'autobuild_arm_program', '(', 'elfname', ',', 'test_dir', '=', 'os', '.', 'path', '.', 'join', '(', "'firmware'", ',', "'test'", ')', ',', 'patch', '=', 'True', ')', ':', 'try', ':', '#Build for all targets', 'family', '=', 'utilities', '.', 'get_family', '(', "'module_settings.json'", ')', 'family', '.', 'for_all_targets', '(', 'family', '.', 'tile', '.', 'short_name', ',', 'lambda', 'x', ':', 'arm', '.', 'build_program', '(', 'family', '.', 'tile', ',', 'elfname', ',', 'x', ',', 'patch', '=', 'patch', ')', ')', '#Build all unit tests', 'unit_test', '.', 'build_units', '(', 'os', '.', 'path', '.', 'join', '(', "'firmware'", ',', "'test'", ')', ',', 'family', '.', 'targets', '(', 'family', '.', 'tile', '.', 'short_name', ')', ')', 'Alias', '(', "'release'", ',', 'os', '.', 'path', '.', 'join', '(', "'build'", ',', "'output'", ')', ')', 'Alias', '(', "'test'", ',', 'os', '.', 'path', '.', 'join', '(', "'build'", ',', "'test'", ',', "'output'", ')', ')', 'Default', '(', '[', "'release'", ',', "'test'", ']', ')', 'autobuild_release', '(', 'family', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', "'doc'", ')', ':', 'autobuild_documentation', '(', 'family', '.', 'tile', ')', 'except', 'IOTileException', 'as', 'e', ':', 'print', '(', 'e', '.', 'format', '(', ')', ')', 'sys', '.', 'exit', '(', '1', ')']
Build the an ARM module for all targets and build all unit tests. If pcb files are given, also build those.
['Build', 'the', 'an', 'ARM', 'module', 'for', 'all', 'targets', 'and', 'build', 'all', 'unit', 'tests', '.', 'If', 'pcb', 'files', 'are', 'given', 'also', 'build', 'those', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/site_scons/autobuild.py#L152-L176
4,739
fastai/fastai
fastai/widgets/image_cleaner.py
ImageCleaner.get_widgets
def get_widgets(self, duplicates): "Create and format widget set." widgets = [] for (img,fp,human_readable_label) in self._all_images[:self._batch_size]: img_widget = self.make_img_widget(img, layout=Layout(height='250px', width='300px')) dropdown = self.make_dropdown_widget(description='', options=self._labels, value=human_readable_label, file_path=fp, handler=self.relabel, layout=Layout(width='auto')) delete_btn = self.make_button_widget('Delete', file_path=fp, handler=self.on_delete) widgets.append(self.make_vertical_box([img_widget, dropdown, delete_btn], layout=Layout(width='auto', height='300px', overflow_x="hidden"), duplicates=duplicates)) self._batch.append((img_widget, delete_btn, fp)) return widgets
python
def get_widgets(self, duplicates): "Create and format widget set." widgets = [] for (img,fp,human_readable_label) in self._all_images[:self._batch_size]: img_widget = self.make_img_widget(img, layout=Layout(height='250px', width='300px')) dropdown = self.make_dropdown_widget(description='', options=self._labels, value=human_readable_label, file_path=fp, handler=self.relabel, layout=Layout(width='auto')) delete_btn = self.make_button_widget('Delete', file_path=fp, handler=self.on_delete) widgets.append(self.make_vertical_box([img_widget, dropdown, delete_btn], layout=Layout(width='auto', height='300px', overflow_x="hidden"), duplicates=duplicates)) self._batch.append((img_widget, delete_btn, fp)) return widgets
['def', 'get_widgets', '(', 'self', ',', 'duplicates', ')', ':', 'widgets', '=', '[', ']', 'for', '(', 'img', ',', 'fp', ',', 'human_readable_label', ')', 'in', 'self', '.', '_all_images', '[', ':', 'self', '.', '_batch_size', ']', ':', 'img_widget', '=', 'self', '.', 'make_img_widget', '(', 'img', ',', 'layout', '=', 'Layout', '(', 'height', '=', "'250px'", ',', 'width', '=', "'300px'", ')', ')', 'dropdown', '=', 'self', '.', 'make_dropdown_widget', '(', 'description', '=', "''", ',', 'options', '=', 'self', '.', '_labels', ',', 'value', '=', 'human_readable_label', ',', 'file_path', '=', 'fp', ',', 'handler', '=', 'self', '.', 'relabel', ',', 'layout', '=', 'Layout', '(', 'width', '=', "'auto'", ')', ')', 'delete_btn', '=', 'self', '.', 'make_button_widget', '(', "'Delete'", ',', 'file_path', '=', 'fp', ',', 'handler', '=', 'self', '.', 'on_delete', ')', 'widgets', '.', 'append', '(', 'self', '.', 'make_vertical_box', '(', '[', 'img_widget', ',', 'dropdown', ',', 'delete_btn', ']', ',', 'layout', '=', 'Layout', '(', 'width', '=', "'auto'", ',', 'height', '=', "'300px'", ',', 'overflow_x', '=', '"hidden"', ')', ',', 'duplicates', '=', 'duplicates', ')', ')', 'self', '.', '_batch', '.', 'append', '(', '(', 'img_widget', ',', 'delete_btn', ',', 'fp', ')', ')', 'return', 'widgets']
Create and format widget set.
['Create', 'and', 'format', 'widget', 'set', '.']
train
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/widgets/image_cleaner.py#L189-L201
4,740
pantsbuild/pants
contrib/node/src/python/pants/contrib/node/tasks/node_task.py
NodeTask.add_package
def add_package( self, target=None, package_manager=None, package=None, type_option=None, version_option=None, node_paths=None, workunit_name=None, workunit_labels=None): """Add an additional package using requested package_manager.""" package_manager = package_manager or self.get_package_manager(target=target) command = package_manager.add_package( package, type_option=type_option, version_option=version_option, node_paths=node_paths, ) return self._execute_command( command, workunit_name=workunit_name, workunit_labels=workunit_labels)
python
def add_package( self, target=None, package_manager=None, package=None, type_option=None, version_option=None, node_paths=None, workunit_name=None, workunit_labels=None): """Add an additional package using requested package_manager.""" package_manager = package_manager or self.get_package_manager(target=target) command = package_manager.add_package( package, type_option=type_option, version_option=version_option, node_paths=node_paths, ) return self._execute_command( command, workunit_name=workunit_name, workunit_labels=workunit_labels)
['def', 'add_package', '(', 'self', ',', 'target', '=', 'None', ',', 'package_manager', '=', 'None', ',', 'package', '=', 'None', ',', 'type_option', '=', 'None', ',', 'version_option', '=', 'None', ',', 'node_paths', '=', 'None', ',', 'workunit_name', '=', 'None', ',', 'workunit_labels', '=', 'None', ')', ':', 'package_manager', '=', 'package_manager', 'or', 'self', '.', 'get_package_manager', '(', 'target', '=', 'target', ')', 'command', '=', 'package_manager', '.', 'add_package', '(', 'package', ',', 'type_option', '=', 'type_option', ',', 'version_option', '=', 'version_option', ',', 'node_paths', '=', 'node_paths', ',', ')', 'return', 'self', '.', '_execute_command', '(', 'command', ',', 'workunit_name', '=', 'workunit_name', ',', 'workunit_labels', '=', 'workunit_labels', ')']
Add an additional package using requested package_manager.
['Add', 'an', 'additional', 'package', 'using', 'requested', 'package_manager', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/node/src/python/pants/contrib/node/tasks/node_task.py#L83-L96
4,741
kblin/ncbi-genome-download
ncbi_genome_download/core.py
get_strain_label
def get_strain_label(entry, viral=False): """Try to extract a strain from an assemly summary entry. First this checks 'infraspecific_name', then 'isolate', then it tries to get it from 'organism_name'. If all fails, it falls back to just returning the assembly accesion number. """ def get_strain(entry): strain = entry['infraspecific_name'] if strain != '': strain = strain.split('=')[-1] return strain strain = entry['isolate'] if strain != '': return strain if len(entry['organism_name'].split(' ')) > 2 and not viral: strain = ' '.join(entry['organism_name'].split(' ')[2:]) return strain return entry['assembly_accession'] def cleanup(strain): strain = strain.strip() strain = strain.replace(' ', '_') strain = strain.replace(';', '_') strain = strain.replace('/', '_') strain = strain.replace('\\', '_') return strain return cleanup(get_strain(entry))
python
def get_strain_label(entry, viral=False): """Try to extract a strain from an assemly summary entry. First this checks 'infraspecific_name', then 'isolate', then it tries to get it from 'organism_name'. If all fails, it falls back to just returning the assembly accesion number. """ def get_strain(entry): strain = entry['infraspecific_name'] if strain != '': strain = strain.split('=')[-1] return strain strain = entry['isolate'] if strain != '': return strain if len(entry['organism_name'].split(' ')) > 2 and not viral: strain = ' '.join(entry['organism_name'].split(' ')[2:]) return strain return entry['assembly_accession'] def cleanup(strain): strain = strain.strip() strain = strain.replace(' ', '_') strain = strain.replace(';', '_') strain = strain.replace('/', '_') strain = strain.replace('\\', '_') return strain return cleanup(get_strain(entry))
['def', 'get_strain_label', '(', 'entry', ',', 'viral', '=', 'False', ')', ':', 'def', 'get_strain', '(', 'entry', ')', ':', 'strain', '=', 'entry', '[', "'infraspecific_name'", ']', 'if', 'strain', '!=', "''", ':', 'strain', '=', 'strain', '.', 'split', '(', "'='", ')', '[', '-', '1', ']', 'return', 'strain', 'strain', '=', 'entry', '[', "'isolate'", ']', 'if', 'strain', '!=', "''", ':', 'return', 'strain', 'if', 'len', '(', 'entry', '[', "'organism_name'", ']', '.', 'split', '(', "' '", ')', ')', '>', '2', 'and', 'not', 'viral', ':', 'strain', '=', "' '", '.', 'join', '(', 'entry', '[', "'organism_name'", ']', '.', 'split', '(', "' '", ')', '[', '2', ':', ']', ')', 'return', 'strain', 'return', 'entry', '[', "'assembly_accession'", ']', 'def', 'cleanup', '(', 'strain', ')', ':', 'strain', '=', 'strain', '.', 'strip', '(', ')', 'strain', '=', 'strain', '.', 'replace', '(', "' '", ',', "'_'", ')', 'strain', '=', 'strain', '.', 'replace', '(', "';'", ',', "'_'", ')', 'strain', '=', 'strain', '.', 'replace', '(', "'/'", ',', "'_'", ')', 'strain', '=', 'strain', '.', 'replace', '(', "'\\\\'", ',', "'_'", ')', 'return', 'strain', 'return', 'cleanup', '(', 'get_strain', '(', 'entry', ')', ')']
Try to extract a strain from an assemly summary entry. First this checks 'infraspecific_name', then 'isolate', then it tries to get it from 'organism_name'. If all fails, it falls back to just returning the assembly accesion number.
['Try', 'to', 'extract', 'a', 'strain', 'from', 'an', 'assemly', 'summary', 'entry', '.']
train
https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L582-L613
4,742
angr/angr
angr/sim_manager.py
SimulationManager.move
def move(self, from_stash, to_stash, filter_func=None): """ Move states from one stash to another. :param from_stash: Take matching states from this stash. :param to_stash: Put matching states into this stash. :param filter_func: Stash states that match this filter. Should be a function that takes a state and returns True or False. (default: stash all states) :returns: The simulation manager, for chaining. :rtype: SimulationManager """ filter_func = filter_func or (lambda s: True) stash_splitter = lambda states: reversed(self._filter_states(filter_func, states)) return self.split(stash_splitter, from_stash=from_stash, to_stash=to_stash)
python
def move(self, from_stash, to_stash, filter_func=None): """ Move states from one stash to another. :param from_stash: Take matching states from this stash. :param to_stash: Put matching states into this stash. :param filter_func: Stash states that match this filter. Should be a function that takes a state and returns True or False. (default: stash all states) :returns: The simulation manager, for chaining. :rtype: SimulationManager """ filter_func = filter_func or (lambda s: True) stash_splitter = lambda states: reversed(self._filter_states(filter_func, states)) return self.split(stash_splitter, from_stash=from_stash, to_stash=to_stash)
['def', 'move', '(', 'self', ',', 'from_stash', ',', 'to_stash', ',', 'filter_func', '=', 'None', ')', ':', 'filter_func', '=', 'filter_func', 'or', '(', 'lambda', 's', ':', 'True', ')', 'stash_splitter', '=', 'lambda', 'states', ':', 'reversed', '(', 'self', '.', '_filter_states', '(', 'filter_func', ',', 'states', ')', ')', 'return', 'self', '.', 'split', '(', 'stash_splitter', ',', 'from_stash', '=', 'from_stash', ',', 'to_stash', '=', 'to_stash', ')']
Move states from one stash to another. :param from_stash: Take matching states from this stash. :param to_stash: Put matching states into this stash. :param filter_func: Stash states that match this filter. Should be a function that takes a state and returns True or False. (default: stash all states) :returns: The simulation manager, for chaining. :rtype: SimulationManager
['Move', 'states', 'from', 'one', 'stash', 'to', 'another', '.']
train
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_manager.py#L461-L475
4,743
awslabs/sockeye
sockeye/training.py
DecoderProcessManager.collect_results
def collect_results(self) -> Optional[Tuple[int, Dict[str, float]]]: """ Returns the decoded checkpoint and the decoder metrics or None if the queue is empty. """ self.wait_to_finish() if self.decoder_metric_queue.empty(): if self._results_pending: self._any_process_died = True self._results_pending = False return None decoded_checkpoint, decoder_metrics = self.decoder_metric_queue.get() assert self.decoder_metric_queue.empty() self._results_pending = False logger.info("Decoder-%d finished: %s", decoded_checkpoint, decoder_metrics) return decoded_checkpoint, decoder_metrics
python
def collect_results(self) -> Optional[Tuple[int, Dict[str, float]]]: """ Returns the decoded checkpoint and the decoder metrics or None if the queue is empty. """ self.wait_to_finish() if self.decoder_metric_queue.empty(): if self._results_pending: self._any_process_died = True self._results_pending = False return None decoded_checkpoint, decoder_metrics = self.decoder_metric_queue.get() assert self.decoder_metric_queue.empty() self._results_pending = False logger.info("Decoder-%d finished: %s", decoded_checkpoint, decoder_metrics) return decoded_checkpoint, decoder_metrics
['def', 'collect_results', '(', 'self', ')', '->', 'Optional', '[', 'Tuple', '[', 'int', ',', 'Dict', '[', 'str', ',', 'float', ']', ']', ']', ':', 'self', '.', 'wait_to_finish', '(', ')', 'if', 'self', '.', 'decoder_metric_queue', '.', 'empty', '(', ')', ':', 'if', 'self', '.', '_results_pending', ':', 'self', '.', '_any_process_died', '=', 'True', 'self', '.', '_results_pending', '=', 'False', 'return', 'None', 'decoded_checkpoint', ',', 'decoder_metrics', '=', 'self', '.', 'decoder_metric_queue', '.', 'get', '(', ')', 'assert', 'self', '.', 'decoder_metric_queue', '.', 'empty', '(', ')', 'self', '.', '_results_pending', '=', 'False', 'logger', '.', 'info', '(', '"Decoder-%d finished: %s"', ',', 'decoded_checkpoint', ',', 'decoder_metrics', ')', 'return', 'decoded_checkpoint', ',', 'decoder_metrics']
Returns the decoded checkpoint and the decoder metrics or None if the queue is empty.
['Returns', 'the', 'decoded', 'checkpoint', 'and', 'the', 'decoder', 'metrics', 'or', 'None', 'if', 'the', 'queue', 'is', 'empty', '.']
train
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/training.py#L1319-L1333
4,744
gc3-uzh-ch/elasticluster
elasticluster/providers/openstack.py
OpenStackCloudProvider._check_security_groups
def _check_security_groups(self, names): """ Raise an exception if any of the named security groups does not exist. :param List[str] groups: List of security group names :raises: `SecurityGroupError` if group does not exist """ self._init_os_api() log.debug("Checking existence of security group(s) %s ...", names) try: # python-novaclient < 8.0.0 security_groups = self.nova_client.security_groups.list() existing = set(sg.name for sg in security_groups) except AttributeError: security_groups = self.neutron_client.list_security_groups()['security_groups'] existing = set(sg[u'name'] for sg in security_groups) # TODO: We should be able to create the security group if it # doesn't exist and at least add a rule to accept ssh access. # Also, we should be able to add new rules to a security group # if needed. nonexisting = set(names) - existing if nonexisting: raise SecurityGroupError( "Security group(s) `{0}` do not exist" .format(', '.join(nonexisting))) # if we get to this point, all sec groups exist return True
python
def _check_security_groups(self, names): """ Raise an exception if any of the named security groups does not exist. :param List[str] groups: List of security group names :raises: `SecurityGroupError` if group does not exist """ self._init_os_api() log.debug("Checking existence of security group(s) %s ...", names) try: # python-novaclient < 8.0.0 security_groups = self.nova_client.security_groups.list() existing = set(sg.name for sg in security_groups) except AttributeError: security_groups = self.neutron_client.list_security_groups()['security_groups'] existing = set(sg[u'name'] for sg in security_groups) # TODO: We should be able to create the security group if it # doesn't exist and at least add a rule to accept ssh access. # Also, we should be able to add new rules to a security group # if needed. nonexisting = set(names) - existing if nonexisting: raise SecurityGroupError( "Security group(s) `{0}` do not exist" .format(', '.join(nonexisting))) # if we get to this point, all sec groups exist return True
['def', '_check_security_groups', '(', 'self', ',', 'names', ')', ':', 'self', '.', '_init_os_api', '(', ')', 'log', '.', 'debug', '(', '"Checking existence of security group(s) %s ..."', ',', 'names', ')', 'try', ':', '# python-novaclient < 8.0.0', 'security_groups', '=', 'self', '.', 'nova_client', '.', 'security_groups', '.', 'list', '(', ')', 'existing', '=', 'set', '(', 'sg', '.', 'name', 'for', 'sg', 'in', 'security_groups', ')', 'except', 'AttributeError', ':', 'security_groups', '=', 'self', '.', 'neutron_client', '.', 'list_security_groups', '(', ')', '[', "'security_groups'", ']', 'existing', '=', 'set', '(', 'sg', '[', "u'name'", ']', 'for', 'sg', 'in', 'security_groups', ')', '# TODO: We should be able to create the security group if it', "# doesn't exist and at least add a rule to accept ssh access.", '# Also, we should be able to add new rules to a security group', '# if needed.', 'nonexisting', '=', 'set', '(', 'names', ')', '-', 'existing', 'if', 'nonexisting', ':', 'raise', 'SecurityGroupError', '(', '"Security group(s) `{0}` do not exist"', '.', 'format', '(', "', '", '.', 'join', '(', 'nonexisting', ')', ')', ')', '# if we get to this point, all sec groups exist', 'return', 'True']
Raise an exception if any of the named security groups does not exist. :param List[str] groups: List of security group names :raises: `SecurityGroupError` if group does not exist
['Raise', 'an', 'exception', 'if', 'any', 'of', 'the', 'named', 'security', 'groups', 'does', 'not', 'exist', '.']
train
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/openstack.py#L720-L748
4,745
awslabs/serverless-application-model
samtranslator/model/intrinsics.py
is_instrinsic
def is_instrinsic(input): """ Checks if the given input is an intrinsic function dictionary. Intrinsic function is a dictionary with single key that is the name of the intrinsics. :param input: Input value to check if it is an intrinsic :return: True, if yes """ if input is not None \ and isinstance(input, dict) \ and len(input) == 1: key = list(input.keys())[0] return key == "Ref" or key == "Condition" or key.startswith("Fn::") return False
python
def is_instrinsic(input): """ Checks if the given input is an intrinsic function dictionary. Intrinsic function is a dictionary with single key that is the name of the intrinsics. :param input: Input value to check if it is an intrinsic :return: True, if yes """ if input is not None \ and isinstance(input, dict) \ and len(input) == 1: key = list(input.keys())[0] return key == "Ref" or key == "Condition" or key.startswith("Fn::") return False
['def', 'is_instrinsic', '(', 'input', ')', ':', 'if', 'input', 'is', 'not', 'None', 'and', 'isinstance', '(', 'input', ',', 'dict', ')', 'and', 'len', '(', 'input', ')', '==', '1', ':', 'key', '=', 'list', '(', 'input', '.', 'keys', '(', ')', ')', '[', '0', ']', 'return', 'key', '==', '"Ref"', 'or', 'key', '==', '"Condition"', 'or', 'key', '.', 'startswith', '(', '"Fn::"', ')', 'return', 'False']
Checks if the given input is an intrinsic function dictionary. Intrinsic function is a dictionary with single key that is the name of the intrinsics. :param input: Input value to check if it is an intrinsic :return: True, if yes
['Checks', 'if', 'the', 'given', 'input', 'is', 'an', 'intrinsic', 'function', 'dictionary', '.', 'Intrinsic', 'function', 'is', 'a', 'dictionary', 'with', 'single', 'key', 'that', 'is', 'the', 'name', 'of', 'the', 'intrinsics', '.']
train
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/intrinsics.py#L124-L140
4,746
DataBiosphere/toil
src/toil/leader.py
Leader._handledFailedSuccessor
def _handledFailedSuccessor(self, jobNode, jobGraph, successorJobStoreID): """Deal with the successor having failed. Return True if there are still active successors. Return False if all successors have failed and the job is queued to run to handle the failed successors.""" logger.debug("Successor job: %s of job: %s has failed """ "predecessors", jobNode, jobGraph) # Add the job to the set having failed successors self.toilState.hasFailedSuccessors.add(jobGraph.jobStoreID) # Reduce active successor count and remove the successor as an active successor of the job self.toilState.successorCounts[jobGraph.jobStoreID] -= 1 assert self.toilState.successorCounts[jobGraph.jobStoreID] >= 0 self.toilState.successorJobStoreIDToPredecessorJobs[successorJobStoreID].remove(jobGraph) if len(self.toilState.successorJobStoreIDToPredecessorJobs[successorJobStoreID]) == 0: self.toilState.successorJobStoreIDToPredecessorJobs.pop(successorJobStoreID) # If the job now has no active successors add to active jobs # so it can be processed as a job with failed successors if self.toilState.successorCounts[jobGraph.jobStoreID] == 0: logger.debug("Job: %s has no successors to run " "and some are failed, adding to list of jobs " "with failed successors", jobGraph) self.toilState.successorCounts.pop(jobGraph.jobStoreID) self.toilState.updatedJobs.add((jobGraph, 0)) return False
python
def _handledFailedSuccessor(self, jobNode, jobGraph, successorJobStoreID): """Deal with the successor having failed. Return True if there are still active successors. Return False if all successors have failed and the job is queued to run to handle the failed successors.""" logger.debug("Successor job: %s of job: %s has failed """ "predecessors", jobNode, jobGraph) # Add the job to the set having failed successors self.toilState.hasFailedSuccessors.add(jobGraph.jobStoreID) # Reduce active successor count and remove the successor as an active successor of the job self.toilState.successorCounts[jobGraph.jobStoreID] -= 1 assert self.toilState.successorCounts[jobGraph.jobStoreID] >= 0 self.toilState.successorJobStoreIDToPredecessorJobs[successorJobStoreID].remove(jobGraph) if len(self.toilState.successorJobStoreIDToPredecessorJobs[successorJobStoreID]) == 0: self.toilState.successorJobStoreIDToPredecessorJobs.pop(successorJobStoreID) # If the job now has no active successors add to active jobs # so it can be processed as a job with failed successors if self.toilState.successorCounts[jobGraph.jobStoreID] == 0: logger.debug("Job: %s has no successors to run " "and some are failed, adding to list of jobs " "with failed successors", jobGraph) self.toilState.successorCounts.pop(jobGraph.jobStoreID) self.toilState.updatedJobs.add((jobGraph, 0)) return False
['def', '_handledFailedSuccessor', '(', 'self', ',', 'jobNode', ',', 'jobGraph', ',', 'successorJobStoreID', ')', ':', 'logger', '.', 'debug', '(', '"Successor job: %s of job: %s has failed "', '""', '"predecessors"', ',', 'jobNode', ',', 'jobGraph', ')', '# Add the job to the set having failed successors', 'self', '.', 'toilState', '.', 'hasFailedSuccessors', '.', 'add', '(', 'jobGraph', '.', 'jobStoreID', ')', '# Reduce active successor count and remove the successor as an active successor of the job', 'self', '.', 'toilState', '.', 'successorCounts', '[', 'jobGraph', '.', 'jobStoreID', ']', '-=', '1', 'assert', 'self', '.', 'toilState', '.', 'successorCounts', '[', 'jobGraph', '.', 'jobStoreID', ']', '>=', '0', 'self', '.', 'toilState', '.', 'successorJobStoreIDToPredecessorJobs', '[', 'successorJobStoreID', ']', '.', 'remove', '(', 'jobGraph', ')', 'if', 'len', '(', 'self', '.', 'toilState', '.', 'successorJobStoreIDToPredecessorJobs', '[', 'successorJobStoreID', ']', ')', '==', '0', ':', 'self', '.', 'toilState', '.', 'successorJobStoreIDToPredecessorJobs', '.', 'pop', '(', 'successorJobStoreID', ')', '# If the job now has no active successors add to active jobs', '# so it can be processed as a job with failed successors', 'if', 'self', '.', 'toilState', '.', 'successorCounts', '[', 'jobGraph', '.', 'jobStoreID', ']', '==', '0', ':', 'logger', '.', 'debug', '(', '"Job: %s has no successors to run "', '"and some are failed, adding to list of jobs "', '"with failed successors"', ',', 'jobGraph', ')', 'self', '.', 'toilState', '.', 'successorCounts', '.', 'pop', '(', 'jobGraph', '.', 'jobStoreID', ')', 'self', '.', 'toilState', '.', 'updatedJobs', '.', 'add', '(', '(', 'jobGraph', ',', '0', ')', ')', 'return', 'False']
Deal with the successor having failed. Return True if there are still active successors. Return False if all successors have failed and the job is queued to run to handle the failed successors.
['Deal', 'with', 'the', 'successor', 'having', 'failed', '.', 'Return', 'True', 'if', 'there', 'are', 'still', 'active', 'successors', '.', 'Return', 'False', 'if', 'all', 'successors', 'have', 'failed', 'and', 'the', 'job', 'is', 'queued', 'to', 'run', 'to', 'handle', 'the', 'failed', 'successors', '.']
train
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L259-L284
4,747
IBMStreams/pypi.streamsx
streamsx/rest_primitives.py
_StreamsV4Delegator._cancel_job
def _cancel_job(self, job, force): """Cancel job using streamtool.""" import streamsx.st as st if st._has_local_install: return st._cancel_job(job.id, force, domain_id=job.get_instance().get_domain().id, instance_id=job.get_instance().id) return False
python
def _cancel_job(self, job, force): """Cancel job using streamtool.""" import streamsx.st as st if st._has_local_install: return st._cancel_job(job.id, force, domain_id=job.get_instance().get_domain().id, instance_id=job.get_instance().id) return False
['def', '_cancel_job', '(', 'self', ',', 'job', ',', 'force', ')', ':', 'import', 'streamsx', '.', 'st', 'as', 'st', 'if', 'st', '.', '_has_local_install', ':', 'return', 'st', '.', '_cancel_job', '(', 'job', '.', 'id', ',', 'force', ',', 'domain_id', '=', 'job', '.', 'get_instance', '(', ')', '.', 'get_domain', '(', ')', '.', 'id', ',', 'instance_id', '=', 'job', '.', 'get_instance', '(', ')', '.', 'id', ')', 'return', 'False']
Cancel job using streamtool.
['Cancel', 'job', 'using', 'streamtool', '.']
train
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest_primitives.py#L2419-L2425
4,748
littlemo/moear-spider-zhihudaily
moear_spider_zhihudaily/spiders/zhihu_daily.py
ZhihuDailySpider.parse_post
def parse_post(self, response): ''' 根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容, 并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中 :param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象 ''' content = json.loads(response.body.decode(), encoding='UTF-8') post = response.meta['post'] post['origin_url'] = content.get('share_url', '') if not all([post['origin_url']]): raise ValueError('原文地址为空') post['title'] = html.escape(content.get('title', '')) if not all([post['title']]): raise ValueError('文章标题为空 - {}'.format(post.get('origin_url'))) # 单独处理type字段为1的情况,即该文章为站外转发文章 if content.get('type') == 1: self.logger.warn('遇到站外文章,单独处理 - {}'.format(post['title'])) return post soup = BeautifulSoup(content.get('body', ''), 'lxml') author_obj = soup.select('span.author') self.logger.debug(author_obj) if author_obj: author_list = [] for author in author_obj: author_list.append( author.string.rstrip(',, ').replace(',', ',')) author_list = list(set(author_list)) post['author'] = html.escape(','.join(author_list)) post['content'] = str(soup.div) # 继续填充post数据 image_back = content.get('images', [None])[0] if image_back: post['meta']['moear.cover_image_slug'] = \ content.get('image', image_back) self.logger.debug(post)
python
def parse_post(self, response): ''' 根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容, 并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中 :param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象 ''' content = json.loads(response.body.decode(), encoding='UTF-8') post = response.meta['post'] post['origin_url'] = content.get('share_url', '') if not all([post['origin_url']]): raise ValueError('原文地址为空') post['title'] = html.escape(content.get('title', '')) if not all([post['title']]): raise ValueError('文章标题为空 - {}'.format(post.get('origin_url'))) # 单独处理type字段为1的情况,即该文章为站外转发文章 if content.get('type') == 1: self.logger.warn('遇到站外文章,单独处理 - {}'.format(post['title'])) return post soup = BeautifulSoup(content.get('body', ''), 'lxml') author_obj = soup.select('span.author') self.logger.debug(author_obj) if author_obj: author_list = [] for author in author_obj: author_list.append( author.string.rstrip(',, ').replace(',', ',')) author_list = list(set(author_list)) post['author'] = html.escape(','.join(author_list)) post['content'] = str(soup.div) # 继续填充post数据 image_back = content.get('images', [None])[0] if image_back: post['meta']['moear.cover_image_slug'] = \ content.get('image', image_back) self.logger.debug(post)
['def', 'parse_post', '(', 'self', ',', 'response', ')', ':', 'content', '=', 'json', '.', 'loads', '(', 'response', '.', 'body', '.', 'decode', '(', ')', ',', 'encoding', '=', "'UTF-8'", ')', 'post', '=', 'response', '.', 'meta', '[', "'post'", ']', 'post', '[', "'origin_url'", ']', '=', 'content', '.', 'get', '(', "'share_url'", ',', "''", ')', 'if', 'not', 'all', '(', '[', 'post', '[', "'origin_url'", ']', ']', ')', ':', 'raise', 'ValueError', '(', "'原文地址为空')", '', 'post', '[', "'title'", ']', '=', 'html', '.', 'escape', '(', 'content', '.', 'get', '(', "'title'", ',', "''", ')', ')', 'if', 'not', 'all', '(', '[', 'post', '[', "'title'", ']', ']', ')', ':', 'raise', 'ValueError', '(', "'文章标题为空 - {}'.format(post", '.', "get('o", 'r', 'igin', '_', 'url', "'", ')))', '', '', '', '# 单独处理type字段为1的情况,即该文章为站外转发文章', 'if', 'content', '.', 'get', '(', "'type'", ')', '==', '1', ':', 'self', '.', 'logger', '.', 'warn', '(', "'遇到站外文章,单独处理 - {}'.format(post['title'])", ')', '', '', '', '', '', '', '', '', 'return', 'post', 'soup', '=', 'BeautifulSoup', '(', 'content', '.', 'get', '(', "'body'", ',', "''", ')', ',', "'lxml'", ')', 'author_obj', '=', 'soup', '.', 'select', '(', "'span.author'", ')', 'self', '.', 'logger', '.', 'debug', '(', 'author_obj', ')', 'if', 'author_obj', ':', 'author_list', '=', '[', ']', 'for', 'author', 'in', 'author_obj', ':', 'author_list', '.', 'append', '(', 'author', '.', 'string', '.', 'rstrip', '(', "',, ').", 'r', 'e', "place('", ',', "', ',", "'", ')', '', '', 'author_list', '=', 'list', '(', 'set', '(', 'author_list', ')', ')', 'post', '[', "'author'", ']', '=', 'html', '.', 'escape', '(', "','.j", 'o', 'in(a', 'u', 'thor_list))', '', '', 'post', '[', "'content'", ']', '=', 'str', '(', 'soup', '.', 'div', ')', '# 继续填充post数据', 'image_back', '=', 'content', '.', 'get', '(', "'images'", ',', '[', 'None', ']', ')', '[', '0', ']', 'if', 'image_back', ':', 'post', '[', "'meta'", ']', '[', "'moear.cover_image_slug'", ']', '=', 'content', '.', 'get', '(', "'image'", ',', 'image_back', ')', 'self', '.', 'logger', '.', 'debug', '(', 'post', ')']
根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容, 并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中 :param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象
['根据', ':', 'meth', ':', '.', 'ZhihuDailySpider', '.', 'parse', '中生成的具体文章地址,获取到文章内容,', '并对其进行格式化处理,结果填充到对象属性', 'item_list', '中']
train
https://github.com/littlemo/moear-spider-zhihudaily/blob/1e4e60b547afe3e2fbb3bbcb7d07a75dca608149/moear_spider_zhihudaily/spiders/zhihu_daily.py#L126-L166
4,749
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
xmlNs.newNodeEatName
def newNodeEatName(self, name): """Creation of a new node element. @ns is optional (None). """ ret = libxml2mod.xmlNewNodeEatName(self._o, name) if ret is None:raise treeError('xmlNewNodeEatName() failed') __tmp = xmlNode(_obj=ret) return __tmp
python
def newNodeEatName(self, name): """Creation of a new node element. @ns is optional (None). """ ret = libxml2mod.xmlNewNodeEatName(self._o, name) if ret is None:raise treeError('xmlNewNodeEatName() failed') __tmp = xmlNode(_obj=ret) return __tmp
['def', 'newNodeEatName', '(', 'self', ',', 'name', ')', ':', 'ret', '=', 'libxml2mod', '.', 'xmlNewNodeEatName', '(', 'self', '.', '_o', ',', 'name', ')', 'if', 'ret', 'is', 'None', ':', 'raise', 'treeError', '(', "'xmlNewNodeEatName() failed'", ')', '__tmp', '=', 'xmlNode', '(', '_obj', '=', 'ret', ')', 'return', '__tmp']
Creation of a new node element. @ns is optional (None).
['Creation', 'of', 'a', 'new', 'node', 'element', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L5946-L5951
4,750
bitesofcode/projexui
projexui/xsettings.py
XSettings.fileName
def fileName(self): """ Returns the filename. :return <str> """ if self._filename: return self._filename filename = nativestring(super(XSettings, self).fileName()) if self._customFormat: filename, ext = os.path.splitext(filename) filename += '.' + self._customFormat.extension() return filename
python
def fileName(self): """ Returns the filename. :return <str> """ if self._filename: return self._filename filename = nativestring(super(XSettings, self).fileName()) if self._customFormat: filename, ext = os.path.splitext(filename) filename += '.' + self._customFormat.extension() return filename
['def', 'fileName', '(', 'self', ')', ':', 'if', 'self', '.', '_filename', ':', 'return', 'self', '.', '_filename', 'filename', '=', 'nativestring', '(', 'super', '(', 'XSettings', ',', 'self', ')', '.', 'fileName', '(', ')', ')', 'if', 'self', '.', '_customFormat', ':', 'filename', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'filename', ')', 'filename', '+=', "'.'", '+', 'self', '.', '_customFormat', '.', 'extension', '(', ')', 'return', 'filename']
Returns the filename. :return <str>
['Returns', 'the', 'filename', '.', ':', 'return', '<str', '>']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xsettings.py#L565-L578
4,751
florianpaquet/mease
mease/registry.py
Mease.sender
def sender(self, func, routing=None, routing_re=None): """ Registers a sender function """ if routing and not isinstance(routing, list): routing = [routing] if routing_re: if not isinstance(routing_re, list): routing_re = [routing_re] routing_re[:] = [re.compile(r) for r in routing_re] self.senders.append((func, routing, routing_re))
python
def sender(self, func, routing=None, routing_re=None): """ Registers a sender function """ if routing and not isinstance(routing, list): routing = [routing] if routing_re: if not isinstance(routing_re, list): routing_re = [routing_re] routing_re[:] = [re.compile(r) for r in routing_re] self.senders.append((func, routing, routing_re))
['def', 'sender', '(', 'self', ',', 'func', ',', 'routing', '=', 'None', ',', 'routing_re', '=', 'None', ')', ':', 'if', 'routing', 'and', 'not', 'isinstance', '(', 'routing', ',', 'list', ')', ':', 'routing', '=', '[', 'routing', ']', 'if', 'routing_re', ':', 'if', 'not', 'isinstance', '(', 'routing_re', ',', 'list', ')', ':', 'routing_re', '=', '[', 'routing_re', ']', 'routing_re', '[', ':', ']', '=', '[', 're', '.', 'compile', '(', 'r', ')', 'for', 'r', 'in', 'routing_re', ']', 'self', '.', 'senders', '.', 'append', '(', '(', 'func', ',', 'routing', ',', 'routing_re', ')', ')']
Registers a sender function
['Registers', 'a', 'sender', 'function']
train
https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/registry.py#L65-L77
4,752
dschep/ntfy
ntfy/backends/pushjet.py
notify
def notify(title, message, secret, endpoint=None, level=3, link=None, retcode=None): """ Required parameter: * ``secret`` - The Pushjet service secret token, created with http://docs.pushjet.io/docs/creating-a-new-service Optional parameters: * ``endpoint`` - custom Pushjet API endpoint (defaults to https://api.pushjet.io) * ``level`` - The importance level from 1(low) to 5(high) * ``link`` """ data = { 'title': title, 'message': message, 'level': level, 'secret': secret, } if link: data['link'] = link headers = {'User-Agent': USER_AGENT} if endpoint is None: endpoint = 'https://api.pushjet.io' resp = requests.post(endpoint + '/message', data=data, headers=headers) resp.raise_for_status()
python
def notify(title, message, secret, endpoint=None, level=3, link=None, retcode=None): """ Required parameter: * ``secret`` - The Pushjet service secret token, created with http://docs.pushjet.io/docs/creating-a-new-service Optional parameters: * ``endpoint`` - custom Pushjet API endpoint (defaults to https://api.pushjet.io) * ``level`` - The importance level from 1(low) to 5(high) * ``link`` """ data = { 'title': title, 'message': message, 'level': level, 'secret': secret, } if link: data['link'] = link headers = {'User-Agent': USER_AGENT} if endpoint is None: endpoint = 'https://api.pushjet.io' resp = requests.post(endpoint + '/message', data=data, headers=headers) resp.raise_for_status()
['def', 'notify', '(', 'title', ',', 'message', ',', 'secret', ',', 'endpoint', '=', 'None', ',', 'level', '=', '3', ',', 'link', '=', 'None', ',', 'retcode', '=', 'None', ')', ':', 'data', '=', '{', "'title'", ':', 'title', ',', "'message'", ':', 'message', ',', "'level'", ':', 'level', ',', "'secret'", ':', 'secret', ',', '}', 'if', 'link', ':', 'data', '[', "'link'", ']', '=', 'link', 'headers', '=', '{', "'User-Agent'", ':', 'USER_AGENT', '}', 'if', 'endpoint', 'is', 'None', ':', 'endpoint', '=', "'https://api.pushjet.io'", 'resp', '=', 'requests', '.', 'post', '(', 'endpoint', '+', "'/message'", ',', 'data', '=', 'data', ',', 'headers', '=', 'headers', ')', 'resp', '.', 'raise_for_status', '(', ')']
Required parameter: * ``secret`` - The Pushjet service secret token, created with http://docs.pushjet.io/docs/creating-a-new-service Optional parameters: * ``endpoint`` - custom Pushjet API endpoint (defaults to https://api.pushjet.io) * ``level`` - The importance level from 1(low) to 5(high) * ``link``
['Required', 'parameter', ':', '*', 'secret', '-', 'The', 'Pushjet', 'service', 'secret', 'token', 'created', 'with', 'http', ':', '//', 'docs', '.', 'pushjet', '.', 'io', '/', 'docs', '/', 'creating', '-', 'a', '-', 'new', '-', 'service']
train
https://github.com/dschep/ntfy/blob/ecfeee960af406a27ebb123495e0ec2733286889/ntfy/backends/pushjet.py#L6-L42
4,753
yvesalexandre/bandicoot
bandicoot/weekmatrix.py
_extract_list_from_generator
def _extract_list_from_generator(generator): """ Iterates over a generator to extract all the objects and add them to a list. Useful when the objects have to be used multiple times. """ extracted = [] for i in generator: extracted.append(list(i)) return extracted
python
def _extract_list_from_generator(generator): """ Iterates over a generator to extract all the objects and add them to a list. Useful when the objects have to be used multiple times. """ extracted = [] for i in generator: extracted.append(list(i)) return extracted
['def', '_extract_list_from_generator', '(', 'generator', ')', ':', 'extracted', '=', '[', ']', 'for', 'i', 'in', 'generator', ':', 'extracted', '.', 'append', '(', 'list', '(', 'i', ')', ')', 'return', 'extracted']
Iterates over a generator to extract all the objects and add them to a list. Useful when the objects have to be used multiple times.
['Iterates', 'over', 'a', 'generator', 'to', 'extract', 'all', 'the', 'objects', 'and', 'add', 'them', 'to', 'a', 'list', '.', 'Useful', 'when', 'the', 'objects', 'have', 'to', 'be', 'used', 'multiple', 'times', '.']
train
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/weekmatrix.py#L310-L319
4,754
toros-astro/corral
corral/template/template/migrations/env.py
run_migrations
def run_migrations(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ with engine.connect() as connection: context.configure( connection=connection, target_metadata=Model.metadata) with context.begin_transaction(): context.run_migrations()
python
def run_migrations(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ with engine.connect() as connection: context.configure( connection=connection, target_metadata=Model.metadata) with context.begin_transaction(): context.run_migrations()
['def', 'run_migrations', '(', ')', ':', 'with', 'engine', '.', 'connect', '(', ')', 'as', 'connection', ':', 'context', '.', 'configure', '(', 'connection', '=', 'connection', ',', 'target_metadata', '=', 'Model', '.', 'metadata', ')', 'with', 'context', '.', 'begin_transaction', '(', ')', ':', 'context', '.', 'run_migrations', '(', ')']
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
['Run', 'migrations', 'in', 'online', 'mode', '.']
train
https://github.com/toros-astro/corral/blob/75474b38ff366330d33644461a902d07374a5bbc/corral/template/template/migrations/env.py#L44-L57
4,755
harlowja/notifier
notifier/_notifier.py
Notifier.listeners_iter
def listeners_iter(self): """Return an iterator over the mapping of event => listeners bound. The listener list(s) returned should **not** be mutated. NOTE(harlowja): Each listener in the yielded (event, listeners) tuple is an instance of the :py:class:`~.Listener` type, which itself wraps a provided callback (and its details filter callback, if any). """ topics = set(six.iterkeys(self._topics)) while topics: event_type = topics.pop() try: yield event_type, self._topics[event_type] except KeyError: pass
python
def listeners_iter(self): """Return an iterator over the mapping of event => listeners bound. The listener list(s) returned should **not** be mutated. NOTE(harlowja): Each listener in the yielded (event, listeners) tuple is an instance of the :py:class:`~.Listener` type, which itself wraps a provided callback (and its details filter callback, if any). """ topics = set(six.iterkeys(self._topics)) while topics: event_type = topics.pop() try: yield event_type, self._topics[event_type] except KeyError: pass
['def', 'listeners_iter', '(', 'self', ')', ':', 'topics', '=', 'set', '(', 'six', '.', 'iterkeys', '(', 'self', '.', '_topics', ')', ')', 'while', 'topics', ':', 'event_type', '=', 'topics', '.', 'pop', '(', ')', 'try', ':', 'yield', 'event_type', ',', 'self', '.', '_topics', '[', 'event_type', ']', 'except', 'KeyError', ':', 'pass']
Return an iterator over the mapping of event => listeners bound. The listener list(s) returned should **not** be mutated. NOTE(harlowja): Each listener in the yielded (event, listeners) tuple is an instance of the :py:class:`~.Listener` type, which itself wraps a provided callback (and its details filter callback, if any).
['Return', 'an', 'iterator', 'over', 'the', 'mapping', 'of', 'event', '=', '>', 'listeners', 'bound', '.']
train
https://github.com/harlowja/notifier/blob/35bf58e6350b1d3a3e8c4224e9d01178df70d753/notifier/_notifier.py#L451-L467
4,756
SpriteLink/NIPAP
nipap-cli/nipap_cli/nipap_cli.py
add_pool
def add_pool(arg, opts, shell_opts): """ Add a pool. """ p = Pool() p.name = opts.get('name') p.description = opts.get('description') p.default_type = opts.get('default-type') p.ipv4_default_prefix_length = opts.get('ipv4_default_prefix_length') p.ipv6_default_prefix_length = opts.get('ipv6_default_prefix_length') if 'tags' in opts: tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0] p.tags = {} for tag_name in tags: tag = Tag() tag.name = tag_name p.tags[tag_name] = tag for avp in opts.get('extra-attribute', []): try: key, value = avp.split('=', 1) except ValueError: print("ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp, file=sys.stderr) return p.avps[key] = value try: p.save() except pynipap.NipapError as exc: print("Could not add pool to NIPAP: %s" % str(exc), file=sys.stderr) sys.exit(1) print("Pool '%s' created." % (p.name))
python
def add_pool(arg, opts, shell_opts): """ Add a pool. """ p = Pool() p.name = opts.get('name') p.description = opts.get('description') p.default_type = opts.get('default-type') p.ipv4_default_prefix_length = opts.get('ipv4_default_prefix_length') p.ipv6_default_prefix_length = opts.get('ipv6_default_prefix_length') if 'tags' in opts: tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0] p.tags = {} for tag_name in tags: tag = Tag() tag.name = tag_name p.tags[tag_name] = tag for avp in opts.get('extra-attribute', []): try: key, value = avp.split('=', 1) except ValueError: print("ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp, file=sys.stderr) return p.avps[key] = value try: p.save() except pynipap.NipapError as exc: print("Could not add pool to NIPAP: %s" % str(exc), file=sys.stderr) sys.exit(1) print("Pool '%s' created." % (p.name))
['def', 'add_pool', '(', 'arg', ',', 'opts', ',', 'shell_opts', ')', ':', 'p', '=', 'Pool', '(', ')', 'p', '.', 'name', '=', 'opts', '.', 'get', '(', "'name'", ')', 'p', '.', 'description', '=', 'opts', '.', 'get', '(', "'description'", ')', 'p', '.', 'default_type', '=', 'opts', '.', 'get', '(', "'default-type'", ')', 'p', '.', 'ipv4_default_prefix_length', '=', 'opts', '.', 'get', '(', "'ipv4_default_prefix_length'", ')', 'p', '.', 'ipv6_default_prefix_length', '=', 'opts', '.', 'get', '(', "'ipv6_default_prefix_length'", ')', 'if', "'tags'", 'in', 'opts', ':', 'tags', '=', 'list', '(', 'csv', '.', 'reader', '(', '[', 'opts', '.', 'get', '(', "'tags'", ',', "''", ')', ']', ',', 'escapechar', '=', "'\\\\'", ')', ')', '[', '0', ']', 'p', '.', 'tags', '=', '{', '}', 'for', 'tag_name', 'in', 'tags', ':', 'tag', '=', 'Tag', '(', ')', 'tag', '.', 'name', '=', 'tag_name', 'p', '.', 'tags', '[', 'tag_name', ']', '=', 'tag', 'for', 'avp', 'in', 'opts', '.', 'get', '(', "'extra-attribute'", ',', '[', ']', ')', ':', 'try', ':', 'key', ',', 'value', '=', 'avp', '.', 'split', '(', "'='", ',', '1', ')', 'except', 'ValueError', ':', 'print', '(', '"ERROR: Incorrect extra-attribute: %s. Accepted form: \'key=value\'\\n"', '%', 'avp', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'return', 'p', '.', 'avps', '[', 'key', ']', '=', 'value', 'try', ':', 'p', '.', 'save', '(', ')', 'except', 'pynipap', '.', 'NipapError', 'as', 'exc', ':', 'print', '(', '"Could not add pool to NIPAP: %s"', '%', 'str', '(', 'exc', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'sys', '.', 'exit', '(', '1', ')', 'print', '(', '"Pool \'%s\' created."', '%', '(', 'p', '.', 'name', ')', ')']
Add a pool.
['Add', 'a', 'pool', '.']
train
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap-cli/nipap_cli/nipap_cli.py#L965-L998
4,757
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
CollectionReference.add
def add(self, document_data, document_id=None): """Create a document in the Firestore database with the provided data. Args: document_data (dict): Property names and values to use for creating the document. document_id (Optional[str]): The document identifier within the current collection. If not provided, an ID will be automatically assigned by the server (the assigned ID will be a random 20 character string composed of digits, uppercase and lowercase letters). Returns: Tuple[google.protobuf.timestamp_pb2.Timestamp, \ ~.firestore_v1beta1.document.DocumentReference]: Pair of * The ``update_time`` when the document was created (or overwritten). * A document reference for the created document. Raises: ~google.cloud.exceptions.Conflict: If ``document_id`` is provided and the document already exists. """ if document_id is None: parent_path, expected_prefix = self._parent_info() document_pb = document_pb2.Document() created_document_pb = self._client._firestore_api.create_document( parent_path, collection_id=self.id, document_id=None, document=document_pb, mask=None, metadata=self._client._rpc_metadata, ) new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix) document_ref = self.document(new_document_id) set_result = document_ref.set(document_data) return set_result.update_time, document_ref else: document_ref = self.document(document_id) write_result = document_ref.create(document_data) return write_result.update_time, document_ref
python
def add(self, document_data, document_id=None): """Create a document in the Firestore database with the provided data. Args: document_data (dict): Property names and values to use for creating the document. document_id (Optional[str]): The document identifier within the current collection. If not provided, an ID will be automatically assigned by the server (the assigned ID will be a random 20 character string composed of digits, uppercase and lowercase letters). Returns: Tuple[google.protobuf.timestamp_pb2.Timestamp, \ ~.firestore_v1beta1.document.DocumentReference]: Pair of * The ``update_time`` when the document was created (or overwritten). * A document reference for the created document. Raises: ~google.cloud.exceptions.Conflict: If ``document_id`` is provided and the document already exists. """ if document_id is None: parent_path, expected_prefix = self._parent_info() document_pb = document_pb2.Document() created_document_pb = self._client._firestore_api.create_document( parent_path, collection_id=self.id, document_id=None, document=document_pb, mask=None, metadata=self._client._rpc_metadata, ) new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix) document_ref = self.document(new_document_id) set_result = document_ref.set(document_data) return set_result.update_time, document_ref else: document_ref = self.document(document_id) write_result = document_ref.create(document_data) return write_result.update_time, document_ref
['def', 'add', '(', 'self', ',', 'document_data', ',', 'document_id', '=', 'None', ')', ':', 'if', 'document_id', 'is', 'None', ':', 'parent_path', ',', 'expected_prefix', '=', 'self', '.', '_parent_info', '(', ')', 'document_pb', '=', 'document_pb2', '.', 'Document', '(', ')', 'created_document_pb', '=', 'self', '.', '_client', '.', '_firestore_api', '.', 'create_document', '(', 'parent_path', ',', 'collection_id', '=', 'self', '.', 'id', ',', 'document_id', '=', 'None', ',', 'document', '=', 'document_pb', ',', 'mask', '=', 'None', ',', 'metadata', '=', 'self', '.', '_client', '.', '_rpc_metadata', ',', ')', 'new_document_id', '=', '_helpers', '.', 'get_doc_id', '(', 'created_document_pb', ',', 'expected_prefix', ')', 'document_ref', '=', 'self', '.', 'document', '(', 'new_document_id', ')', 'set_result', '=', 'document_ref', '.', 'set', '(', 'document_data', ')', 'return', 'set_result', '.', 'update_time', ',', 'document_ref', 'else', ':', 'document_ref', '=', 'self', '.', 'document', '(', 'document_id', ')', 'write_result', '=', 'document_ref', '.', 'create', '(', 'document_data', ')', 'return', 'write_result', '.', 'update_time', ',', 'document_ref']
Create a document in the Firestore database with the provided data. Args: document_data (dict): Property names and values to use for creating the document. document_id (Optional[str]): The document identifier within the current collection. If not provided, an ID will be automatically assigned by the server (the assigned ID will be a random 20 character string composed of digits, uppercase and lowercase letters). Returns: Tuple[google.protobuf.timestamp_pb2.Timestamp, \ ~.firestore_v1beta1.document.DocumentReference]: Pair of * The ``update_time`` when the document was created (or overwritten). * A document reference for the created document. Raises: ~google.cloud.exceptions.Conflict: If ``document_id`` is provided and the document already exists.
['Create', 'a', 'document', 'in', 'the', 'Firestore', 'database', 'with', 'the', 'provided', 'data', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L135-L180
4,758
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxfile_functions.py
upload_string
def upload_string(to_upload, media_type=None, keep_open=False, wait_on_close=False, **kwargs): """ :param to_upload: String to upload into a file :type to_upload: string :param media_type: Internet Media Type :type media_type: string :param keep_open: If False, closes the file after uploading :type keep_open: boolean :param wait_on_close: If True, waits for the file to close :type wait_on_close: boolean :returns: Remote file handler :rtype: :class:`~dxpy.bindings.dxfile.DXFile` Additional optional parameters not listed: all those under :func:`dxpy.bindings.DXDataObject.new`. Uploads the data in the string *to_upload* into a new file object (with media type *media_type* if given) and returns the associated remote file handler. """ # Use 'a' mode because we will be responsible for closing the file # ourselves later (if requested). handler = new_dxfile(media_type=media_type, mode='a', **kwargs) # For subsequent API calls, don't supply the dataobject metadata # parameters that are only needed at creation time. _, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs) handler.write(to_upload, **remaining_kwargs) if not keep_open: handler.close(block=wait_on_close, **remaining_kwargs) return handler
python
def upload_string(to_upload, media_type=None, keep_open=False, wait_on_close=False, **kwargs): """ :param to_upload: String to upload into a file :type to_upload: string :param media_type: Internet Media Type :type media_type: string :param keep_open: If False, closes the file after uploading :type keep_open: boolean :param wait_on_close: If True, waits for the file to close :type wait_on_close: boolean :returns: Remote file handler :rtype: :class:`~dxpy.bindings.dxfile.DXFile` Additional optional parameters not listed: all those under :func:`dxpy.bindings.DXDataObject.new`. Uploads the data in the string *to_upload* into a new file object (with media type *media_type* if given) and returns the associated remote file handler. """ # Use 'a' mode because we will be responsible for closing the file # ourselves later (if requested). handler = new_dxfile(media_type=media_type, mode='a', **kwargs) # For subsequent API calls, don't supply the dataobject metadata # parameters that are only needed at creation time. _, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs) handler.write(to_upload, **remaining_kwargs) if not keep_open: handler.close(block=wait_on_close, **remaining_kwargs) return handler
['def', 'upload_string', '(', 'to_upload', ',', 'media_type', '=', 'None', ',', 'keep_open', '=', 'False', ',', 'wait_on_close', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', "# Use 'a' mode because we will be responsible for closing the file", '# ourselves later (if requested).', 'handler', '=', 'new_dxfile', '(', 'media_type', '=', 'media_type', ',', 'mode', '=', "'a'", ',', '*', '*', 'kwargs', ')', "# For subsequent API calls, don't supply the dataobject metadata", '# parameters that are only needed at creation time.', '_', ',', 'remaining_kwargs', '=', 'dxpy', '.', 'DXDataObject', '.', '_get_creation_params', '(', 'kwargs', ')', 'handler', '.', 'write', '(', 'to_upload', ',', '*', '*', 'remaining_kwargs', ')', 'if', 'not', 'keep_open', ':', 'handler', '.', 'close', '(', 'block', '=', 'wait_on_close', ',', '*', '*', 'remaining_kwargs', ')', 'return', 'handler']
:param to_upload: String to upload into a file :type to_upload: string :param media_type: Internet Media Type :type media_type: string :param keep_open: If False, closes the file after uploading :type keep_open: boolean :param wait_on_close: If True, waits for the file to close :type wait_on_close: boolean :returns: Remote file handler :rtype: :class:`~dxpy.bindings.dxfile.DXFile` Additional optional parameters not listed: all those under :func:`dxpy.bindings.DXDataObject.new`. Uploads the data in the string *to_upload* into a new file object (with media type *media_type* if given) and returns the associated remote file handler.
[':', 'param', 'to_upload', ':', 'String', 'to', 'upload', 'into', 'a', 'file', ':', 'type', 'to_upload', ':', 'string', ':', 'param', 'media_type', ':', 'Internet', 'Media', 'Type', ':', 'type', 'media_type', ':', 'string', ':', 'param', 'keep_open', ':', 'If', 'False', 'closes', 'the', 'file', 'after', 'uploading', ':', 'type', 'keep_open', ':', 'boolean', ':', 'param', 'wait_on_close', ':', 'If', 'True', 'waits', 'for', 'the', 'file', 'to', 'close', ':', 'type', 'wait_on_close', ':', 'boolean', ':', 'returns', ':', 'Remote', 'file', 'handler', ':', 'rtype', ':', ':', 'class', ':', '~dxpy', '.', 'bindings', '.', 'dxfile', '.', 'DXFile']
train
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L561-L595
4,759
git-afsantos/bonsai
bonsai/model.py
CodeFunction.pretty_str
def pretty_str(self, indent=0): """Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation. """ spaces = ' ' * indent params = ', '.join(map(lambda p: p.result + ' ' + p.name, self.parameters)) if self.is_constructor: pretty = '{}{}({}):\n'.format(spaces, self.name, params) else: pretty = '{}{} {}({}):\n'.format(spaces, self.result, self.name, params) if self._definition is not self: pretty += spaces + ' [declaration]' else: pretty += self.body.pretty_str(indent + 2) return pretty
python
def pretty_str(self, indent=0): """Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation. """ spaces = ' ' * indent params = ', '.join(map(lambda p: p.result + ' ' + p.name, self.parameters)) if self.is_constructor: pretty = '{}{}({}):\n'.format(spaces, self.name, params) else: pretty = '{}{} {}({}):\n'.format(spaces, self.result, self.name, params) if self._definition is not self: pretty += spaces + ' [declaration]' else: pretty += self.body.pretty_str(indent + 2) return pretty
['def', 'pretty_str', '(', 'self', ',', 'indent', '=', '0', ')', ':', 'spaces', '=', "' '", '*', 'indent', 'params', '=', "', '", '.', 'join', '(', 'map', '(', 'lambda', 'p', ':', 'p', '.', 'result', '+', "' '", '+', 'p', '.', 'name', ',', 'self', '.', 'parameters', ')', ')', 'if', 'self', '.', 'is_constructor', ':', 'pretty', '=', "'{}{}({}):\\n'", '.', 'format', '(', 'spaces', ',', 'self', '.', 'name', ',', 'params', ')', 'else', ':', 'pretty', '=', "'{}{} {}({}):\\n'", '.', 'format', '(', 'spaces', ',', 'self', '.', 'result', ',', 'self', '.', 'name', ',', 'params', ')', 'if', 'self', '.', '_definition', 'is', 'not', 'self', ':', 'pretty', '+=', 'spaces', '+', "' [declaration]'", 'else', ':', 'pretty', '+=', 'self', '.', 'body', '.', 'pretty_str', '(', 'indent', '+', '2', ')', 'return', 'pretty']
Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation.
['Return', 'a', 'human', '-', 'readable', 'string', 'representation', 'of', 'this', 'object', '.']
train
https://github.com/git-afsantos/bonsai/blob/aa5af3f535b3b506bfc95c107c501fc9c4bcd072/bonsai/model.py#L326-L344
4,760
molmod/molmod
molmod/io/gamess.py
HessianParser.read
def read(self, line, f, data): """See :meth:`PunchParser.read`""" assert("hessian" not in data) f.readline() N = len(data["symbols"]) hessian = np.zeros((3*N, 3*N), float) tmp = hessian.ravel() counter = 0 while True: line = f.readline() if line == " $END\n": break line = line[5:-1] for j in range(len(line)//15): tmp[counter] = float(line[j*15:(j+1)*15]) counter += 1 data["hessian"] = hessian
python
def read(self, line, f, data): """See :meth:`PunchParser.read`""" assert("hessian" not in data) f.readline() N = len(data["symbols"]) hessian = np.zeros((3*N, 3*N), float) tmp = hessian.ravel() counter = 0 while True: line = f.readline() if line == " $END\n": break line = line[5:-1] for j in range(len(line)//15): tmp[counter] = float(line[j*15:(j+1)*15]) counter += 1 data["hessian"] = hessian
['def', 'read', '(', 'self', ',', 'line', ',', 'f', ',', 'data', ')', ':', 'assert', '(', '"hessian"', 'not', 'in', 'data', ')', 'f', '.', 'readline', '(', ')', 'N', '=', 'len', '(', 'data', '[', '"symbols"', ']', ')', 'hessian', '=', 'np', '.', 'zeros', '(', '(', '3', '*', 'N', ',', '3', '*', 'N', ')', ',', 'float', ')', 'tmp', '=', 'hessian', '.', 'ravel', '(', ')', 'counter', '=', '0', 'while', 'True', ':', 'line', '=', 'f', '.', 'readline', '(', ')', 'if', 'line', '==', '" $END\\n"', ':', 'break', 'line', '=', 'line', '[', '5', ':', '-', '1', ']', 'for', 'j', 'in', 'range', '(', 'len', '(', 'line', ')', '//', '15', ')', ':', 'tmp', '[', 'counter', ']', '=', 'float', '(', 'line', '[', 'j', '*', '15', ':', '(', 'j', '+', '1', ')', '*', '15', ']', ')', 'counter', '+=', '1', 'data', '[', '"hessian"', ']', '=', 'hessian']
See :meth:`PunchParser.read`
['See', ':', 'meth', ':', 'PunchParser', '.', 'read']
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/gamess.py#L207-L223
4,761
esheldon/fitsio
fitsio/hdu/table.py
_npy2fits
def _npy2fits(d, table_type='binary', write_bitcols=False): """ d is the full element from the descr """ npy_dtype = d[1][1:] if npy_dtype[0] == 'S' or npy_dtype[0] == 'U': name, form, dim = _npy_string2fits(d, table_type=table_type) else: name, form, dim = _npy_num2fits( d, table_type=table_type, write_bitcols=write_bitcols) return name, form, dim
python
def _npy2fits(d, table_type='binary', write_bitcols=False): """ d is the full element from the descr """ npy_dtype = d[1][1:] if npy_dtype[0] == 'S' or npy_dtype[0] == 'U': name, form, dim = _npy_string2fits(d, table_type=table_type) else: name, form, dim = _npy_num2fits( d, table_type=table_type, write_bitcols=write_bitcols) return name, form, dim
['def', '_npy2fits', '(', 'd', ',', 'table_type', '=', "'binary'", ',', 'write_bitcols', '=', 'False', ')', ':', 'npy_dtype', '=', 'd', '[', '1', ']', '[', '1', ':', ']', 'if', 'npy_dtype', '[', '0', ']', '==', "'S'", 'or', 'npy_dtype', '[', '0', ']', '==', "'U'", ':', 'name', ',', 'form', ',', 'dim', '=', '_npy_string2fits', '(', 'd', ',', 'table_type', '=', 'table_type', ')', 'else', ':', 'name', ',', 'form', ',', 'dim', '=', '_npy_num2fits', '(', 'd', ',', 'table_type', '=', 'table_type', ',', 'write_bitcols', '=', 'write_bitcols', ')', 'return', 'name', ',', 'form', ',', 'dim']
d is the full element from the descr
['d', 'is', 'the', 'full', 'element', 'from', 'the', 'descr']
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L2106-L2117
4,762
mitsei/dlkit
dlkit/json_/resource/sessions.py
ResourceBinSession.get_bin_ids_by_resource
def get_bin_ids_by_resource(self, resource_id): """Gets the list of ``Bin`` ``Ids`` mapped to a ``Resource``. arg: resource_id (osid.id.Id): ``Id`` of a ``Resource`` return: (osid.id.IdList) - list of bin ``Ids`` raise: NotFound - ``resource_id`` is not found raise: NullArgument - ``resource_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_bin_ids_by_resource mgr = self._get_provider_manager('RESOURCE', local=True) lookup_session = mgr.get_resource_lookup_session(proxy=self._proxy) lookup_session.use_federated_bin_view() resource = lookup_session.get_resource(resource_id) id_list = [] for idstr in resource._my_map['assignedBinIds']: id_list.append(Id(idstr)) return IdList(id_list)
python
def get_bin_ids_by_resource(self, resource_id): """Gets the list of ``Bin`` ``Ids`` mapped to a ``Resource``. arg: resource_id (osid.id.Id): ``Id`` of a ``Resource`` return: (osid.id.IdList) - list of bin ``Ids`` raise: NotFound - ``resource_id`` is not found raise: NullArgument - ``resource_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_bin_ids_by_resource mgr = self._get_provider_manager('RESOURCE', local=True) lookup_session = mgr.get_resource_lookup_session(proxy=self._proxy) lookup_session.use_federated_bin_view() resource = lookup_session.get_resource(resource_id) id_list = [] for idstr in resource._my_map['assignedBinIds']: id_list.append(Id(idstr)) return IdList(id_list)
['def', 'get_bin_ids_by_resource', '(', 'self', ',', 'resource_id', ')', ':', '# Implemented from template for', '# osid.resource.ResourceBinSession.get_bin_ids_by_resource', 'mgr', '=', 'self', '.', '_get_provider_manager', '(', "'RESOURCE'", ',', 'local', '=', 'True', ')', 'lookup_session', '=', 'mgr', '.', 'get_resource_lookup_session', '(', 'proxy', '=', 'self', '.', '_proxy', ')', 'lookup_session', '.', 'use_federated_bin_view', '(', ')', 'resource', '=', 'lookup_session', '.', 'get_resource', '(', 'resource_id', ')', 'id_list', '=', '[', ']', 'for', 'idstr', 'in', 'resource', '.', '_my_map', '[', "'assignedBinIds'", ']', ':', 'id_list', '.', 'append', '(', 'Id', '(', 'idstr', ')', ')', 'return', 'IdList', '(', 'id_list', ')']
Gets the list of ``Bin`` ``Ids`` mapped to a ``Resource``. arg: resource_id (osid.id.Id): ``Id`` of a ``Resource`` return: (osid.id.IdList) - list of bin ``Ids`` raise: NotFound - ``resource_id`` is not found raise: NullArgument - ``resource_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
['Gets', 'the', 'list', 'of', 'Bin', 'Ids', 'mapped', 'to', 'a', 'Resource', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/sessions.py#L1452-L1473
4,763
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
pairs_to_dict
def pairs_to_dict(response, encoding): "Create a dict given a list of key/value pairs" it = iter(response) return dict(((k.decode(encoding), v) for k, v in zip(it, it)))
python
def pairs_to_dict(response, encoding): "Create a dict given a list of key/value pairs" it = iter(response) return dict(((k.decode(encoding), v) for k, v in zip(it, it)))
['def', 'pairs_to_dict', '(', 'response', ',', 'encoding', ')', ':', 'it', '=', 'iter', '(', 'response', ')', 'return', 'dict', '(', '(', '(', 'k', '.', 'decode', '(', 'encoding', ')', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'zip', '(', 'it', ',', 'it', ')', ')', ')']
Create a dict given a list of key/value pairs
['Create', 'a', 'dict', 'given', 'a', 'list', 'of', 'key', '/', 'value', 'pairs']
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L36-L39
4,764
saltstack/salt
salt/modules/debian_service.py
get_all
def get_all(): ''' Return all available boot services CLI Example: .. code-block:: bash salt '*' service.get_all ''' ret = set() lines = glob.glob('/etc/init.d/*') for line in lines: service = line.split('/etc/init.d/')[1] # Remove README. If it's an enabled service, it will be added back in. if service != 'README': ret.add(service) return sorted(ret | set(get_enabled()))
python
def get_all(): ''' Return all available boot services CLI Example: .. code-block:: bash salt '*' service.get_all ''' ret = set() lines = glob.glob('/etc/init.d/*') for line in lines: service = line.split('/etc/init.d/')[1] # Remove README. If it's an enabled service, it will be added back in. if service != 'README': ret.add(service) return sorted(ret | set(get_enabled()))
['def', 'get_all', '(', ')', ':', 'ret', '=', 'set', '(', ')', 'lines', '=', 'glob', '.', 'glob', '(', "'/etc/init.d/*'", ')', 'for', 'line', 'in', 'lines', ':', 'service', '=', 'line', '.', 'split', '(', "'/etc/init.d/'", ')', '[', '1', ']', "# Remove README. If it's an enabled service, it will be added back in.", 'if', 'service', '!=', "'README'", ':', 'ret', '.', 'add', '(', 'service', ')', 'return', 'sorted', '(', 'ret', '|', 'set', '(', 'get_enabled', '(', ')', ')', ')']
Return all available boot services CLI Example: .. code-block:: bash salt '*' service.get_all
['Return', 'all', 'available', 'boot', 'services']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/debian_service.py#L136-L153
4,765
f3at/feat
src/feat/models/value.py
Boolean.validate
def validate(self, value): """ Accepts: str, unicode, bool Returns: bool """ if isinstance(value, bool): return value if isinstance(value, (str, unicode)): if value.lower() == "true": value = True elif value.lower() == "false": value = False else: raise ValueError("Not a boolean: %r" % (value, )) value = super(Boolean, self).validate(value) if not isinstance(value, bool): raise ValueError("Not a boolean: %r" % (value, )) return value
python
def validate(self, value): """ Accepts: str, unicode, bool Returns: bool """ if isinstance(value, bool): return value if isinstance(value, (str, unicode)): if value.lower() == "true": value = True elif value.lower() == "false": value = False else: raise ValueError("Not a boolean: %r" % (value, )) value = super(Boolean, self).validate(value) if not isinstance(value, bool): raise ValueError("Not a boolean: %r" % (value, )) return value
['def', 'validate', '(', 'self', ',', 'value', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'bool', ')', ':', 'return', 'value', 'if', 'isinstance', '(', 'value', ',', '(', 'str', ',', 'unicode', ')', ')', ':', 'if', 'value', '.', 'lower', '(', ')', '==', '"true"', ':', 'value', '=', 'True', 'elif', 'value', '.', 'lower', '(', ')', '==', '"false"', ':', 'value', '=', 'False', 'else', ':', 'raise', 'ValueError', '(', '"Not a boolean: %r"', '%', '(', 'value', ',', ')', ')', 'value', '=', 'super', '(', 'Boolean', ',', 'self', ')', '.', 'validate', '(', 'value', ')', 'if', 'not', 'isinstance', '(', 'value', ',', 'bool', ')', ':', 'raise', 'ValueError', '(', '"Not a boolean: %r"', '%', '(', 'value', ',', ')', ')', 'return', 'value']
Accepts: str, unicode, bool Returns: bool
['Accepts', ':', 'str', 'unicode', 'bool', 'Returns', ':', 'bool']
train
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/value.py#L613-L630
4,766
diging/tethne
tethne/analyze/corpus.py
_forward
def _forward(X, s=1.1, gamma=1., k=5): """ Forward dynamic algorithm for burstness automaton HMM, from `Kleinberg (2002) <http://www.cs.cornell.edu/home/kleinber/bhs.pdf>`_. Parameters ---------- X : list A series of time-gaps between events. s : float (default: 1.1) Scaling parameter ( > 1.)that controls graininess of burst detection. Lower values make the model more sensitive. gamma : float (default: 1.0) Parameter that controls the 'cost' of higher burst states. Higher values make it more 'difficult' to achieve a higher burst state. k : int (default: 5) Number of states. Higher values increase computational cost of the algorithm. A maximum of 25 is suggested by the literature. Returns ------- states : list Optimal state sequence. """ X = list(X) def alpha(i): return (n/T)*(s**i) def tau(i, j): if j > i: return (j-i)*gamma*log(n) return 0. def f(j, x): return alpha(j) * exp(-1. * alpha(j) * x) def C(j, t): if j == 0 and t == 0: return 0. elif t == 0: return float("inf") C_tau = min([C_values[l][t-1] + tau(l, j) for l in xrange(k)]) return (-1. * log(f(j,X[t]))) + C_tau T = sum(X) n = len(X) # C() requires default (0) values, so we construct the "array" in advance. C_values = [[0 for t in xrange(len(X))] for j in xrange(k)] for j in xrange(k): for t in xrange(len(X)): C_values[j][t] = C(j,t) # Find the optimal state sequence. states = [argmin([c[t] for c in C_values]) for t in xrange(n)] return states
python
def _forward(X, s=1.1, gamma=1., k=5): """ Forward dynamic algorithm for burstness automaton HMM, from `Kleinberg (2002) <http://www.cs.cornell.edu/home/kleinber/bhs.pdf>`_. Parameters ---------- X : list A series of time-gaps between events. s : float (default: 1.1) Scaling parameter ( > 1.)that controls graininess of burst detection. Lower values make the model more sensitive. gamma : float (default: 1.0) Parameter that controls the 'cost' of higher burst states. Higher values make it more 'difficult' to achieve a higher burst state. k : int (default: 5) Number of states. Higher values increase computational cost of the algorithm. A maximum of 25 is suggested by the literature. Returns ------- states : list Optimal state sequence. """ X = list(X) def alpha(i): return (n/T)*(s**i) def tau(i, j): if j > i: return (j-i)*gamma*log(n) return 0. def f(j, x): return alpha(j) * exp(-1. * alpha(j) * x) def C(j, t): if j == 0 and t == 0: return 0. elif t == 0: return float("inf") C_tau = min([C_values[l][t-1] + tau(l, j) for l in xrange(k)]) return (-1. * log(f(j,X[t]))) + C_tau T = sum(X) n = len(X) # C() requires default (0) values, so we construct the "array" in advance. C_values = [[0 for t in xrange(len(X))] for j in xrange(k)] for j in xrange(k): for t in xrange(len(X)): C_values[j][t] = C(j,t) # Find the optimal state sequence. states = [argmin([c[t] for c in C_values]) for t in xrange(n)] return states
['def', '_forward', '(', 'X', ',', 's', '=', '1.1', ',', 'gamma', '=', '1.', ',', 'k', '=', '5', ')', ':', 'X', '=', 'list', '(', 'X', ')', 'def', 'alpha', '(', 'i', ')', ':', 'return', '(', 'n', '/', 'T', ')', '*', '(', 's', '**', 'i', ')', 'def', 'tau', '(', 'i', ',', 'j', ')', ':', 'if', 'j', '>', 'i', ':', 'return', '(', 'j', '-', 'i', ')', '*', 'gamma', '*', 'log', '(', 'n', ')', 'return', '0.', 'def', 'f', '(', 'j', ',', 'x', ')', ':', 'return', 'alpha', '(', 'j', ')', '*', 'exp', '(', '-', '1.', '*', 'alpha', '(', 'j', ')', '*', 'x', ')', 'def', 'C', '(', 'j', ',', 't', ')', ':', 'if', 'j', '==', '0', 'and', 't', '==', '0', ':', 'return', '0.', 'elif', 't', '==', '0', ':', 'return', 'float', '(', '"inf"', ')', 'C_tau', '=', 'min', '(', '[', 'C_values', '[', 'l', ']', '[', 't', '-', '1', ']', '+', 'tau', '(', 'l', ',', 'j', ')', 'for', 'l', 'in', 'xrange', '(', 'k', ')', ']', ')', 'return', '(', '-', '1.', '*', 'log', '(', 'f', '(', 'j', ',', 'X', '[', 't', ']', ')', ')', ')', '+', 'C_tau', 'T', '=', 'sum', '(', 'X', ')', 'n', '=', 'len', '(', 'X', ')', '# C() requires default (0) values, so we construct the "array" in advance.', 'C_values', '=', '[', '[', '0', 'for', 't', 'in', 'xrange', '(', 'len', '(', 'X', ')', ')', ']', 'for', 'j', 'in', 'xrange', '(', 'k', ')', ']', 'for', 'j', 'in', 'xrange', '(', 'k', ')', ':', 'for', 't', 'in', 'xrange', '(', 'len', '(', 'X', ')', ')', ':', 'C_values', '[', 'j', ']', '[', 't', ']', '=', 'C', '(', 'j', ',', 't', ')', '# Find the optimal state sequence.', 'states', '=', '[', 'argmin', '(', '[', 'c', '[', 't', ']', 'for', 'c', 'in', 'C_values', ']', ')', 'for', 't', 'in', 'xrange', '(', 'n', ')', ']', 'return', 'states']
Forward dynamic algorithm for burstness automaton HMM, from `Kleinberg (2002) <http://www.cs.cornell.edu/home/kleinber/bhs.pdf>`_. Parameters ---------- X : list A series of time-gaps between events. s : float (default: 1.1) Scaling parameter ( > 1.)that controls graininess of burst detection. Lower values make the model more sensitive. gamma : float (default: 1.0) Parameter that controls the 'cost' of higher burst states. Higher values make it more 'difficult' to achieve a higher burst state. k : int (default: 5) Number of states. Higher values increase computational cost of the algorithm. A maximum of 25 is suggested by the literature. Returns ------- states : list Optimal state sequence.
['Forward', 'dynamic', 'algorithm', 'for', 'burstness', 'automaton', 'HMM', 'from', 'Kleinberg', '(', '2002', ')', '<http', ':', '//', 'www', '.', 'cs', '.', 'cornell', '.', 'edu', '/', 'home', '/', 'kleinber', '/', 'bhs', '.', 'pdf', '>', '_', '.']
train
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/corpus.py#L30-L88
4,767
LEMS/pylems
lems/parser/LEMS.py
LEMSFileParser.parse_with
def parse_with(self, node): """ Parses <With> @param node: Node containing the <With> element @type node: xml.etree.Element """ if 'instance' in node.lattrib: instance = node.lattrib['instance'] list = None index = None elif 'list' in node.lattrib and 'index' in node.lattrib: instance = None list = node.lattrib['list'] index = node.lattrib['index'] else: self.raise_error('<With> must specify EITHER instance OR list & index') if 'as' in node.lattrib: as_ = node.lattrib['as'] else: self.raise_error('<With> must specify a name for the ' 'target instance') self.current_structure.add_with(With(instance, as_, list, index))
python
def parse_with(self, node): """ Parses <With> @param node: Node containing the <With> element @type node: xml.etree.Element """ if 'instance' in node.lattrib: instance = node.lattrib['instance'] list = None index = None elif 'list' in node.lattrib and 'index' in node.lattrib: instance = None list = node.lattrib['list'] index = node.lattrib['index'] else: self.raise_error('<With> must specify EITHER instance OR list & index') if 'as' in node.lattrib: as_ = node.lattrib['as'] else: self.raise_error('<With> must specify a name for the ' 'target instance') self.current_structure.add_with(With(instance, as_, list, index))
['def', 'parse_with', '(', 'self', ',', 'node', ')', ':', 'if', "'instance'", 'in', 'node', '.', 'lattrib', ':', 'instance', '=', 'node', '.', 'lattrib', '[', "'instance'", ']', 'list', '=', 'None', 'index', '=', 'None', 'elif', "'list'", 'in', 'node', '.', 'lattrib', 'and', "'index'", 'in', 'node', '.', 'lattrib', ':', 'instance', '=', 'None', 'list', '=', 'node', '.', 'lattrib', '[', "'list'", ']', 'index', '=', 'node', '.', 'lattrib', '[', "'index'", ']', 'else', ':', 'self', '.', 'raise_error', '(', "'<With> must specify EITHER instance OR list & index'", ')', 'if', "'as'", 'in', 'node', '.', 'lattrib', ':', 'as_', '=', 'node', '.', 'lattrib', '[', "'as'", ']', 'else', ':', 'self', '.', 'raise_error', '(', "'<With> must specify a name for the '", "'target instance'", ')', 'self', '.', 'current_structure', '.', 'add_with', '(', 'With', '(', 'instance', ',', 'as_', ',', 'list', ',', 'index', ')', ')']
Parses <With> @param node: Node containing the <With> element @type node: xml.etree.Element
['Parses', '<With', '>']
train
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/parser/LEMS.py#L1688-L1713
4,768
googleapis/google-cloud-python
dns/google/cloud/dns/changes.py
Changes.from_api_repr
def from_api_repr(cls, resource, zone): """Factory: construct a change set given its API representation :type resource: dict :param resource: change set representation returned from the API. :type zone: :class:`google.cloud.dns.zone.ManagedZone` :param zone: A zone which holds zero or more change sets. :rtype: :class:`google.cloud.dns.changes.Changes` :returns: RRS parsed from ``resource``. """ changes = cls(zone=zone) changes._set_properties(resource) return changes
python
def from_api_repr(cls, resource, zone): """Factory: construct a change set given its API representation :type resource: dict :param resource: change set representation returned from the API. :type zone: :class:`google.cloud.dns.zone.ManagedZone` :param zone: A zone which holds zero or more change sets. :rtype: :class:`google.cloud.dns.changes.Changes` :returns: RRS parsed from ``resource``. """ changes = cls(zone=zone) changes._set_properties(resource) return changes
['def', 'from_api_repr', '(', 'cls', ',', 'resource', ',', 'zone', ')', ':', 'changes', '=', 'cls', '(', 'zone', '=', 'zone', ')', 'changes', '.', '_set_properties', '(', 'resource', ')', 'return', 'changes']
Factory: construct a change set given its API representation :type resource: dict :param resource: change set representation returned from the API. :type zone: :class:`google.cloud.dns.zone.ManagedZone` :param zone: A zone which holds zero or more change sets. :rtype: :class:`google.cloud.dns.changes.Changes` :returns: RRS parsed from ``resource``.
['Factory', ':', 'construct', 'a', 'change', 'set', 'given', 'its', 'API', 'representation']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dns/google/cloud/dns/changes.py#L42-L56
4,769
zendesk/connect_python_sdk
outbound/__init__.py
unsubscribe
def unsubscribe(user_id, from_all=False, campaign_ids=None, on_error=None, on_success=None): """ Unsubscribe a user from some or all campaigns. :param str | number user_id: the id you use to identify a user. this should be static for the lifetime of a user. :param bool from_all True to unsubscribe from all campaigns. Take precedence over campaigns IDs if both are given. :param list of str campaign_ids List of campaign IDs to unsubscribe the user from. :param func on_error: An optional function to call in the event of an error. on_error callback should take 2 parameters: `code` and `error`. `code` will be one of outbound.ERROR_XXXXXX. `error` will be the corresponding message. :param func on_success: An optional function to call if/when the API call succeeds. on_success callback takes no parameters. """ __subscription( user_id, unsubscribe=True, all_campaigns=from_all, campaign_ids=campaign_ids, on_error=on_error, on_success=on_success, )
python
def unsubscribe(user_id, from_all=False, campaign_ids=None, on_error=None, on_success=None): """ Unsubscribe a user from some or all campaigns. :param str | number user_id: the id you use to identify a user. this should be static for the lifetime of a user. :param bool from_all True to unsubscribe from all campaigns. Take precedence over campaigns IDs if both are given. :param list of str campaign_ids List of campaign IDs to unsubscribe the user from. :param func on_error: An optional function to call in the event of an error. on_error callback should take 2 parameters: `code` and `error`. `code` will be one of outbound.ERROR_XXXXXX. `error` will be the corresponding message. :param func on_success: An optional function to call if/when the API call succeeds. on_success callback takes no parameters. """ __subscription( user_id, unsubscribe=True, all_campaigns=from_all, campaign_ids=campaign_ids, on_error=on_error, on_success=on_success, )
['def', 'unsubscribe', '(', 'user_id', ',', 'from_all', '=', 'False', ',', 'campaign_ids', '=', 'None', ',', 'on_error', '=', 'None', ',', 'on_success', '=', 'None', ')', ':', '__subscription', '(', 'user_id', ',', 'unsubscribe', '=', 'True', ',', 'all_campaigns', '=', 'from_all', ',', 'campaign_ids', '=', 'campaign_ids', ',', 'on_error', '=', 'on_error', ',', 'on_success', '=', 'on_success', ',', ')']
Unsubscribe a user from some or all campaigns. :param str | number user_id: the id you use to identify a user. this should be static for the lifetime of a user. :param bool from_all True to unsubscribe from all campaigns. Take precedence over campaigns IDs if both are given. :param list of str campaign_ids List of campaign IDs to unsubscribe the user from. :param func on_error: An optional function to call in the event of an error. on_error callback should take 2 parameters: `code` and `error`. `code` will be one of outbound.ERROR_XXXXXX. `error` will be the corresponding message. :param func on_success: An optional function to call if/when the API call succeeds. on_success callback takes no parameters.
['Unsubscribe', 'a', 'user', 'from', 'some', 'or', 'all', 'campaigns', '.']
train
https://github.com/zendesk/connect_python_sdk/blob/6d7c1a539dcf23c1b1942e9bf6c9084c929df7e6/outbound/__init__.py#L37-L62
4,770
hollenstein/maspy
maspy/auxiliary.py
listFiletypes
def listFiletypes(targetfilename, directory): """Looks for all occurences of a specified filename in a directory and returns a list of all present file extensions of this filename. In this cas everything after the first dot is considered to be the file extension: ``"filename.txt" -> "txt"``, ``"filename.txt.zip" -> "txt.zip"`` :param targetfilename: a filename without any extensions :param directory: only files present in this directory are compared to the targetfilename :returns: a list of file extensions (str) """ targetextensions = list() for filename in os.listdir(directory): if not os.path.isfile(joinpath(directory, filename)): continue splitname = filename.split('.') basename = splitname[0] extension = '.'.join(splitname[1:]) if basename == targetfilename: targetextensions.append(extension) return targetextensions
python
def listFiletypes(targetfilename, directory): """Looks for all occurences of a specified filename in a directory and returns a list of all present file extensions of this filename. In this cas everything after the first dot is considered to be the file extension: ``"filename.txt" -> "txt"``, ``"filename.txt.zip" -> "txt.zip"`` :param targetfilename: a filename without any extensions :param directory: only files present in this directory are compared to the targetfilename :returns: a list of file extensions (str) """ targetextensions = list() for filename in os.listdir(directory): if not os.path.isfile(joinpath(directory, filename)): continue splitname = filename.split('.') basename = splitname[0] extension = '.'.join(splitname[1:]) if basename == targetfilename: targetextensions.append(extension) return targetextensions
['def', 'listFiletypes', '(', 'targetfilename', ',', 'directory', ')', ':', 'targetextensions', '=', 'list', '(', ')', 'for', 'filename', 'in', 'os', '.', 'listdir', '(', 'directory', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'joinpath', '(', 'directory', ',', 'filename', ')', ')', ':', 'continue', 'splitname', '=', 'filename', '.', 'split', '(', "'.'", ')', 'basename', '=', 'splitname', '[', '0', ']', 'extension', '=', "'.'", '.', 'join', '(', 'splitname', '[', '1', ':', ']', ')', 'if', 'basename', '==', 'targetfilename', ':', 'targetextensions', '.', 'append', '(', 'extension', ')', 'return', 'targetextensions']
Looks for all occurences of a specified filename in a directory and returns a list of all present file extensions of this filename. In this cas everything after the first dot is considered to be the file extension: ``"filename.txt" -> "txt"``, ``"filename.txt.zip" -> "txt.zip"`` :param targetfilename: a filename without any extensions :param directory: only files present in this directory are compared to the targetfilename :returns: a list of file extensions (str)
['Looks', 'for', 'all', 'occurences', 'of', 'a', 'specified', 'filename', 'in', 'a', 'directory', 'and', 'returns', 'a', 'list', 'of', 'all', 'present', 'file', 'extensions', 'of', 'this', 'filename', '.']
train
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/auxiliary.py#L446-L468
4,771
DataBiosphere/toil
src/toil/job.py
Job.addFollowOn
def addFollowOn(self, followOnJob): """ Adds a follow-on job, follow-on jobs will be run after the child jobs and \ their successors have been run. :param toil.job.Job followOnJob: :return: followOnJob :rtype: toil.job.Job """ self._followOns.append(followOnJob) followOnJob._addPredecessor(self) return followOnJob
python
def addFollowOn(self, followOnJob): """ Adds a follow-on job, follow-on jobs will be run after the child jobs and \ their successors have been run. :param toil.job.Job followOnJob: :return: followOnJob :rtype: toil.job.Job """ self._followOns.append(followOnJob) followOnJob._addPredecessor(self) return followOnJob
['def', 'addFollowOn', '(', 'self', ',', 'followOnJob', ')', ':', 'self', '.', '_followOns', '.', 'append', '(', 'followOnJob', ')', 'followOnJob', '.', '_addPredecessor', '(', 'self', ')', 'return', 'followOnJob']
Adds a follow-on job, follow-on jobs will be run after the child jobs and \ their successors have been run. :param toil.job.Job followOnJob: :return: followOnJob :rtype: toil.job.Job
['Adds', 'a', 'follow', '-', 'on', 'job', 'follow', '-', 'on', 'jobs', 'will', 'be', 'run', 'after', 'the', 'child', 'jobs', 'and', '\\', 'their', 'successors', 'have', 'been', 'run', '.']
train
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/job.py#L349-L360
4,772
joshspeagle/dynesty
dynesty/nestedsamplers.py
UnitCubeSampler.update_slice
def update_slice(self, blob): """Update the slice proposal scale based on the relative size of the slices compared to our initial guess.""" nexpand, ncontract = blob['nexpand'], blob['ncontract'] self.scale *= nexpand / (2. * ncontract)
python
def update_slice(self, blob): """Update the slice proposal scale based on the relative size of the slices compared to our initial guess.""" nexpand, ncontract = blob['nexpand'], blob['ncontract'] self.scale *= nexpand / (2. * ncontract)
['def', 'update_slice', '(', 'self', ',', 'blob', ')', ':', 'nexpand', ',', 'ncontract', '=', 'blob', '[', "'nexpand'", ']', ',', 'blob', '[', "'ncontract'", ']', 'self', '.', 'scale', '*=', 'nexpand', '/', '(', '2.', '*', 'ncontract', ')']
Update the slice proposal scale based on the relative size of the slices compared to our initial guess.
['Update', 'the', 'slice', 'proposal', 'scale', 'based', 'on', 'the', 'relative', 'size', 'of', 'the', 'slices', 'compared', 'to', 'our', 'initial', 'guess', '.']
train
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/nestedsamplers.py#L209-L214
4,773
edibledinos/pwnypack
pwnypack/elf.py
ELF.get_symbol
def get_symbol(self, symbol): """ Get a specific symbol by index or name. Args: symbol(int or str): The index or name of the symbol to return. Returns: ELF.Symbol: The symbol. Raises: KeyError: The requested symbol does not exist. """ self._ensure_symbols_loaded() if type(symbol) is int: return self._symbols_by_index[symbol] else: return self._symbols_by_name[symbol]
python
def get_symbol(self, symbol): """ Get a specific symbol by index or name. Args: symbol(int or str): The index or name of the symbol to return. Returns: ELF.Symbol: The symbol. Raises: KeyError: The requested symbol does not exist. """ self._ensure_symbols_loaded() if type(symbol) is int: return self._symbols_by_index[symbol] else: return self._symbols_by_name[symbol]
['def', 'get_symbol', '(', 'self', ',', 'symbol', ')', ':', 'self', '.', '_ensure_symbols_loaded', '(', ')', 'if', 'type', '(', 'symbol', ')', 'is', 'int', ':', 'return', 'self', '.', '_symbols_by_index', '[', 'symbol', ']', 'else', ':', 'return', 'self', '.', '_symbols_by_name', '[', 'symbol', ']']
Get a specific symbol by index or name. Args: symbol(int or str): The index or name of the symbol to return. Returns: ELF.Symbol: The symbol. Raises: KeyError: The requested symbol does not exist.
['Get', 'a', 'specific', 'symbol', 'by', 'index', 'or', 'name', '.']
train
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/elf.py#L911-L929
4,774
dslackw/slpkg
slpkg/messages.py
Msg.answer
def answer(self): """Message answer """ if self.meta.default_answer in ["y", "Y"]: answer = self.meta.default_answer else: try: answer = raw_input("Would you like to continue [y/N]? ") except EOFError: print("") raise SystemExit() return answer
python
def answer(self): """Message answer """ if self.meta.default_answer in ["y", "Y"]: answer = self.meta.default_answer else: try: answer = raw_input("Would you like to continue [y/N]? ") except EOFError: print("") raise SystemExit() return answer
['def', 'answer', '(', 'self', ')', ':', 'if', 'self', '.', 'meta', '.', 'default_answer', 'in', '[', '"y"', ',', '"Y"', ']', ':', 'answer', '=', 'self', '.', 'meta', '.', 'default_answer', 'else', ':', 'try', ':', 'answer', '=', 'raw_input', '(', '"Would you like to continue [y/N]? "', ')', 'except', 'EOFError', ':', 'print', '(', '""', ')', 'raise', 'SystemExit', '(', ')', 'return', 'answer']
Message answer
['Message', 'answer']
train
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/messages.py#L121-L132
4,775
mongodb/mongo-python-driver
pymongo/message.py
_OpReply.raw_response
def raw_response(self, cursor_id=None): """Check the response header from the database, without decoding BSON. Check the response for errors and unpack. Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or OperationFailure. :Parameters: - `cursor_id` (optional): cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response. """ if self.flags & 1: # Shouldn't get this response if we aren't doing a getMore if cursor_id is None: raise ProtocolError("No cursor id for getMore operation") # Fake a getMore command response. OP_GET_MORE provides no # document. msg = "Cursor not found, cursor id: %d" % (cursor_id,) errobj = {"ok": 0, "errmsg": msg, "code": 43} raise CursorNotFound(msg, 43, errobj) elif self.flags & 2: error_object = bson.BSON(self.documents).decode() # Fake the ok field if it doesn't exist. error_object.setdefault("ok", 0) if error_object["$err"].startswith("not master"): raise NotMasterError(error_object["$err"], error_object) elif error_object.get("code") == 50: raise ExecutionTimeout(error_object.get("$err"), error_object.get("code"), error_object) raise OperationFailure("database error: %s" % error_object.get("$err"), error_object.get("code"), error_object) return [self.documents]
python
def raw_response(self, cursor_id=None): """Check the response header from the database, without decoding BSON. Check the response for errors and unpack. Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or OperationFailure. :Parameters: - `cursor_id` (optional): cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response. """ if self.flags & 1: # Shouldn't get this response if we aren't doing a getMore if cursor_id is None: raise ProtocolError("No cursor id for getMore operation") # Fake a getMore command response. OP_GET_MORE provides no # document. msg = "Cursor not found, cursor id: %d" % (cursor_id,) errobj = {"ok": 0, "errmsg": msg, "code": 43} raise CursorNotFound(msg, 43, errobj) elif self.flags & 2: error_object = bson.BSON(self.documents).decode() # Fake the ok field if it doesn't exist. error_object.setdefault("ok", 0) if error_object["$err"].startswith("not master"): raise NotMasterError(error_object["$err"], error_object) elif error_object.get("code") == 50: raise ExecutionTimeout(error_object.get("$err"), error_object.get("code"), error_object) raise OperationFailure("database error: %s" % error_object.get("$err"), error_object.get("code"), error_object) return [self.documents]
['def', 'raw_response', '(', 'self', ',', 'cursor_id', '=', 'None', ')', ':', 'if', 'self', '.', 'flags', '&', '1', ':', "# Shouldn't get this response if we aren't doing a getMore", 'if', 'cursor_id', 'is', 'None', ':', 'raise', 'ProtocolError', '(', '"No cursor id for getMore operation"', ')', '# Fake a getMore command response. OP_GET_MORE provides no', '# document.', 'msg', '=', '"Cursor not found, cursor id: %d"', '%', '(', 'cursor_id', ',', ')', 'errobj', '=', '{', '"ok"', ':', '0', ',', '"errmsg"', ':', 'msg', ',', '"code"', ':', '43', '}', 'raise', 'CursorNotFound', '(', 'msg', ',', '43', ',', 'errobj', ')', 'elif', 'self', '.', 'flags', '&', '2', ':', 'error_object', '=', 'bson', '.', 'BSON', '(', 'self', '.', 'documents', ')', '.', 'decode', '(', ')', "# Fake the ok field if it doesn't exist.", 'error_object', '.', 'setdefault', '(', '"ok"', ',', '0', ')', 'if', 'error_object', '[', '"$err"', ']', '.', 'startswith', '(', '"not master"', ')', ':', 'raise', 'NotMasterError', '(', 'error_object', '[', '"$err"', ']', ',', 'error_object', ')', 'elif', 'error_object', '.', 'get', '(', '"code"', ')', '==', '50', ':', 'raise', 'ExecutionTimeout', '(', 'error_object', '.', 'get', '(', '"$err"', ')', ',', 'error_object', '.', 'get', '(', '"code"', ')', ',', 'error_object', ')', 'raise', 'OperationFailure', '(', '"database error: %s"', '%', 'error_object', '.', 'get', '(', '"$err"', ')', ',', 'error_object', '.', 'get', '(', '"code"', ')', ',', 'error_object', ')', 'return', '[', 'self', '.', 'documents', ']']
Check the response header from the database, without decoding BSON. Check the response for errors and unpack. Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or OperationFailure. :Parameters: - `cursor_id` (optional): cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response.
['Check', 'the', 'response', 'header', 'from', 'the', 'database', 'without', 'decoding', 'BSON', '.']
train
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/message.py#L1373-L1410
4,776
Rafiot/PubSubLogger
pubsublogger/subscriber.py
setup
def setup(name, path='log', enable_debug=False): """ Prepare a NestedSetup. :param name: the channel name :param path: the path where the logs will be written :param enable_debug: do we want to save the message at the DEBUG level :return a nested Setup """ path_tmpl = os.path.join(path, '{name}_{level}.log') info = path_tmpl.format(name=name, level='info') warn = path_tmpl.format(name=name, level='warn') err = path_tmpl.format(name=name, level='err') crit = path_tmpl.format(name=name, level='crit') # a nested handler setup can be used to configure more complex setups setup = [ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), # then write messages that are at least info to to a logfile TimedRotatingFileHandler(info, level='INFO', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least warnings to to a logfile TimedRotatingFileHandler(warn, level='WARNING', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least errors to to a logfile TimedRotatingFileHandler(err, level='ERROR', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least critical errors to to a logfile TimedRotatingFileHandler(crit, level='CRITICAL', encoding='utf-8', date_format='%Y-%m-%d'), ] if enable_debug: debug = path_tmpl.format(name=name, level='debug') setup.insert(1, TimedRotatingFileHandler(debug, level='DEBUG', encoding='utf-8', date_format='%Y-%m-%d')) if src_server is not None and smtp_server is not None \ and smtp_port != 0 and len(dest_mails) != 0: mail_tmpl = '{name}_error@{src}' from_mail = mail_tmpl.format(name=name, src=src_server) subject = 'Error in {}'.format(name) # errors should then be delivered by mail and also be kept # in the application log, so we let them bubble up. setup.append(MailHandler(from_mail, dest_mails, subject, level='ERROR', bubble=True, server_addr=(smtp_server, smtp_port))) return NestedSetup(setup)
python
def setup(name, path='log', enable_debug=False): """ Prepare a NestedSetup. :param name: the channel name :param path: the path where the logs will be written :param enable_debug: do we want to save the message at the DEBUG level :return a nested Setup """ path_tmpl = os.path.join(path, '{name}_{level}.log') info = path_tmpl.format(name=name, level='info') warn = path_tmpl.format(name=name, level='warn') err = path_tmpl.format(name=name, level='err') crit = path_tmpl.format(name=name, level='crit') # a nested handler setup can be used to configure more complex setups setup = [ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), # then write messages that are at least info to to a logfile TimedRotatingFileHandler(info, level='INFO', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least warnings to to a logfile TimedRotatingFileHandler(warn, level='WARNING', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least errors to to a logfile TimedRotatingFileHandler(err, level='ERROR', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least critical errors to to a logfile TimedRotatingFileHandler(crit, level='CRITICAL', encoding='utf-8', date_format='%Y-%m-%d'), ] if enable_debug: debug = path_tmpl.format(name=name, level='debug') setup.insert(1, TimedRotatingFileHandler(debug, level='DEBUG', encoding='utf-8', date_format='%Y-%m-%d')) if src_server is not None and smtp_server is not None \ and smtp_port != 0 and len(dest_mails) != 0: mail_tmpl = '{name}_error@{src}' from_mail = mail_tmpl.format(name=name, src=src_server) subject = 'Error in {}'.format(name) # errors should then be delivered by mail and also be kept # in the application log, so we let them bubble up. setup.append(MailHandler(from_mail, dest_mails, subject, level='ERROR', bubble=True, server_addr=(smtp_server, smtp_port))) return NestedSetup(setup)
['def', 'setup', '(', 'name', ',', 'path', '=', "'log'", ',', 'enable_debug', '=', 'False', ')', ':', 'path_tmpl', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', "'{name}_{level}.log'", ')', 'info', '=', 'path_tmpl', '.', 'format', '(', 'name', '=', 'name', ',', 'level', '=', "'info'", ')', 'warn', '=', 'path_tmpl', '.', 'format', '(', 'name', '=', 'name', ',', 'level', '=', "'warn'", ')', 'err', '=', 'path_tmpl', '.', 'format', '(', 'name', '=', 'name', ',', 'level', '=', "'err'", ')', 'crit', '=', 'path_tmpl', '.', 'format', '(', 'name', '=', 'name', ',', 'level', '=', "'crit'", ')', '# a nested handler setup can be used to configure more complex setups', 'setup', '=', '[', '# make sure we never bubble up to the stderr handler', '# if we run out of setup handling', 'NullHandler', '(', ')', ',', '# then write messages that are at least info to to a logfile', 'TimedRotatingFileHandler', '(', 'info', ',', 'level', '=', "'INFO'", ',', 'encoding', '=', "'utf-8'", ',', 'date_format', '=', "'%Y-%m-%d'", ')', ',', '# then write messages that are at least warnings to to a logfile', 'TimedRotatingFileHandler', '(', 'warn', ',', 'level', '=', "'WARNING'", ',', 'encoding', '=', "'utf-8'", ',', 'date_format', '=', "'%Y-%m-%d'", ')', ',', '# then write messages that are at least errors to to a logfile', 'TimedRotatingFileHandler', '(', 'err', ',', 'level', '=', "'ERROR'", ',', 'encoding', '=', "'utf-8'", ',', 'date_format', '=', "'%Y-%m-%d'", ')', ',', '# then write messages that are at least critical errors to to a logfile', 'TimedRotatingFileHandler', '(', 'crit', ',', 'level', '=', "'CRITICAL'", ',', 'encoding', '=', "'utf-8'", ',', 'date_format', '=', "'%Y-%m-%d'", ')', ',', ']', 'if', 'enable_debug', ':', 'debug', '=', 'path_tmpl', '.', 'format', '(', 'name', '=', 'name', ',', 'level', '=', "'debug'", ')', 'setup', '.', 'insert', '(', '1', ',', 'TimedRotatingFileHandler', '(', 'debug', ',', 'level', '=', "'DEBUG'", ',', 'encoding', '=', "'utf-8'", ',', 'date_format', '=', "'%Y-%m-%d'", ')', ')', 'if', 'src_server', 'is', 'not', 'None', 'and', 'smtp_server', 'is', 'not', 'None', 'and', 'smtp_port', '!=', '0', 'and', 'len', '(', 'dest_mails', ')', '!=', '0', ':', 'mail_tmpl', '=', "'{name}_error@{src}'", 'from_mail', '=', 'mail_tmpl', '.', 'format', '(', 'name', '=', 'name', ',', 'src', '=', 'src_server', ')', 'subject', '=', "'Error in {}'", '.', 'format', '(', 'name', ')', '# errors should then be delivered by mail and also be kept', '# in the application log, so we let them bubble up.', 'setup', '.', 'append', '(', 'MailHandler', '(', 'from_mail', ',', 'dest_mails', ',', 'subject', ',', 'level', '=', "'ERROR'", ',', 'bubble', '=', 'True', ',', 'server_addr', '=', '(', 'smtp_server', ',', 'smtp_port', ')', ')', ')', 'return', 'NestedSetup', '(', 'setup', ')']
Prepare a NestedSetup. :param name: the channel name :param path: the path where the logs will be written :param enable_debug: do we want to save the message at the DEBUG level :return a nested Setup
['Prepare', 'a', 'NestedSetup', '.']
train
https://github.com/Rafiot/PubSubLogger/blob/4f28ad673f42ee2ec7792d414d325aef9a56da53/pubsublogger/subscriber.py#L43-L91
4,777
sixty-north/cosmic-ray
src/cosmic_ray/cli.py
handle_interceptors
def handle_interceptors(args): """usage: {program} interceptors List the available interceptor plugins. """ assert args print('\n'.join(cosmic_ray.plugins.interceptor_names())) return ExitCode.OK
python
def handle_interceptors(args): """usage: {program} interceptors List the available interceptor plugins. """ assert args print('\n'.join(cosmic_ray.plugins.interceptor_names())) return ExitCode.OK
['def', 'handle_interceptors', '(', 'args', ')', ':', 'assert', 'args', 'print', '(', "'\\n'", '.', 'join', '(', 'cosmic_ray', '.', 'plugins', '.', 'interceptor_names', '(', ')', ')', ')', 'return', 'ExitCode', '.', 'OK']
usage: {program} interceptors List the available interceptor plugins.
['usage', ':', '{', 'program', '}', 'interceptors']
train
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/cli.py#L193-L201
4,778
ciena/afkak
afkak/_util.py
_coerce_consumer_group
def _coerce_consumer_group(consumer_group): """ Ensure that the consumer group is a text string. :param consumer_group: :class:`bytes` or :class:`str` instance :raises TypeError: when `consumer_group` is not :class:`bytes` or :class:`str` """ if not isinstance(consumer_group, string_types): raise TypeError('consumer_group={!r} must be text'.format(consumer_group)) if not isinstance(consumer_group, text_type): consumer_group = consumer_group.decode('utf-8') return consumer_group
python
def _coerce_consumer_group(consumer_group): """ Ensure that the consumer group is a text string. :param consumer_group: :class:`bytes` or :class:`str` instance :raises TypeError: when `consumer_group` is not :class:`bytes` or :class:`str` """ if not isinstance(consumer_group, string_types): raise TypeError('consumer_group={!r} must be text'.format(consumer_group)) if not isinstance(consumer_group, text_type): consumer_group = consumer_group.decode('utf-8') return consumer_group
['def', '_coerce_consumer_group', '(', 'consumer_group', ')', ':', 'if', 'not', 'isinstance', '(', 'consumer_group', ',', 'string_types', ')', ':', 'raise', 'TypeError', '(', "'consumer_group={!r} must be text'", '.', 'format', '(', 'consumer_group', ')', ')', 'if', 'not', 'isinstance', '(', 'consumer_group', ',', 'text_type', ')', ':', 'consumer_group', '=', 'consumer_group', '.', 'decode', '(', "'utf-8'", ')', 'return', 'consumer_group']
Ensure that the consumer group is a text string. :param consumer_group: :class:`bytes` or :class:`str` instance :raises TypeError: when `consumer_group` is not :class:`bytes` or :class:`str`
['Ensure', 'that', 'the', 'consumer', 'group', 'is', 'a', 'text', 'string', '.']
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/_util.py#L47-L59
4,779
ARMmbed/icetea
icetea_lib/Plugin/PluginManager.py
PluginManager._register_external_service
def _register_external_service(self, plugin_name, plugin_instance): """ Register an external service. :param plugin_name: Service name :param plugin_instance: PluginBase :return: """ for attr in plugin_instance.get_external_services().keys(): if attr in self._external_services: raise PluginException("External service with name {} already exists! Unable to add " "services from plugin {}.".format(attr, plugin_name)) self._external_services[attr] = plugin_instance.get_external_services().get(attr)
python
def _register_external_service(self, plugin_name, plugin_instance): """ Register an external service. :param plugin_name: Service name :param plugin_instance: PluginBase :return: """ for attr in plugin_instance.get_external_services().keys(): if attr in self._external_services: raise PluginException("External service with name {} already exists! Unable to add " "services from plugin {}.".format(attr, plugin_name)) self._external_services[attr] = plugin_instance.get_external_services().get(attr)
['def', '_register_external_service', '(', 'self', ',', 'plugin_name', ',', 'plugin_instance', ')', ':', 'for', 'attr', 'in', 'plugin_instance', '.', 'get_external_services', '(', ')', '.', 'keys', '(', ')', ':', 'if', 'attr', 'in', 'self', '.', '_external_services', ':', 'raise', 'PluginException', '(', '"External service with name {} already exists! Unable to add "', '"services from plugin {}."', '.', 'format', '(', 'attr', ',', 'plugin_name', ')', ')', 'self', '.', '_external_services', '[', 'attr', ']', '=', 'plugin_instance', '.', 'get_external_services', '(', ')', '.', 'get', '(', 'attr', ')']
Register an external service. :param plugin_name: Service name :param plugin_instance: PluginBase :return:
['Register', 'an', 'external', 'service', '.']
train
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/Plugin/PluginManager.py#L267-L279
4,780
numberoverzero/bloop
bloop/session.py
SessionWrapper.describe_stream
def describe_stream(self, stream_arn, first_shard=None): """Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens. :param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``. :param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned. :return: All shards in the stream, or a subset if ``first_shard`` is provided. :rtype: dict """ description = {"Shards": []} request = {"StreamArn": stream_arn, "ExclusiveStartShardId": first_shard} # boto3 isn't down with literal Nones. if first_shard is None: request.pop("ExclusiveStartShardId") while request.get("ExclusiveStartShardId") is not missing: try: response = self.stream_client.describe_stream(**request)["StreamDescription"] except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "ResourceNotFoundException": raise InvalidStream(f"The stream arn {stream_arn!r} does not exist.") from error raise BloopException("Unexpected error while describing stream.") from error # Docs aren't clear if the terminal value is null, or won't exist. # Since we don't terminate the loop on None, the "or missing" here # will ensure we stop on a falsey value. request["ExclusiveStartShardId"] = response.pop("LastEvaluatedShardId", None) or missing description["Shards"].extend(response.pop("Shards", [])) description.update(response) return description
python
def describe_stream(self, stream_arn, first_shard=None): """Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens. :param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``. :param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned. :return: All shards in the stream, or a subset if ``first_shard`` is provided. :rtype: dict """ description = {"Shards": []} request = {"StreamArn": stream_arn, "ExclusiveStartShardId": first_shard} # boto3 isn't down with literal Nones. if first_shard is None: request.pop("ExclusiveStartShardId") while request.get("ExclusiveStartShardId") is not missing: try: response = self.stream_client.describe_stream(**request)["StreamDescription"] except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "ResourceNotFoundException": raise InvalidStream(f"The stream arn {stream_arn!r} does not exist.") from error raise BloopException("Unexpected error while describing stream.") from error # Docs aren't clear if the terminal value is null, or won't exist. # Since we don't terminate the loop on None, the "or missing" here # will ensure we stop on a falsey value. request["ExclusiveStartShardId"] = response.pop("LastEvaluatedShardId", None) or missing description["Shards"].extend(response.pop("Shards", [])) description.update(response) return description
['def', 'describe_stream', '(', 'self', ',', 'stream_arn', ',', 'first_shard', '=', 'None', ')', ':', 'description', '=', '{', '"Shards"', ':', '[', ']', '}', 'request', '=', '{', '"StreamArn"', ':', 'stream_arn', ',', '"ExclusiveStartShardId"', ':', 'first_shard', '}', "# boto3 isn't down with literal Nones.", 'if', 'first_shard', 'is', 'None', ':', 'request', '.', 'pop', '(', '"ExclusiveStartShardId"', ')', 'while', 'request', '.', 'get', '(', '"ExclusiveStartShardId"', ')', 'is', 'not', 'missing', ':', 'try', ':', 'response', '=', 'self', '.', 'stream_client', '.', 'describe_stream', '(', '*', '*', 'request', ')', '[', '"StreamDescription"', ']', 'except', 'botocore', '.', 'exceptions', '.', 'ClientError', 'as', 'error', ':', 'if', 'error', '.', 'response', '[', '"Error"', ']', '[', '"Code"', ']', '==', '"ResourceNotFoundException"', ':', 'raise', 'InvalidStream', '(', 'f"The stream arn {stream_arn!r} does not exist."', ')', 'from', 'error', 'raise', 'BloopException', '(', '"Unexpected error while describing stream."', ')', 'from', 'error', "# Docs aren't clear if the terminal value is null, or won't exist.", '# Since we don\'t terminate the loop on None, the "or missing" here', '# will ensure we stop on a falsey value.', 'request', '[', '"ExclusiveStartShardId"', ']', '=', 'response', '.', 'pop', '(', '"LastEvaluatedShardId"', ',', 'None', ')', 'or', 'missing', 'description', '[', '"Shards"', ']', '.', 'extend', '(', 'response', '.', 'pop', '(', '"Shards"', ',', '[', ']', ')', ')', 'description', '.', 'update', '(', 'response', ')', 'return', 'description']
Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens. :param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``. :param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned. :return: All shards in the stream, or a subset if ``first_shard`` is provided. :rtype: dict
['Wraps', ':', 'func', ':', 'boto3', '.', 'DynamoDBStreams', '.', 'Client', '.', 'describe_stream', 'handling', 'continuation', 'tokens', '.']
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L304-L332
4,781
abe-winter/pg13-py
pg13/sqex.py
NameIndexer.update_aliases
def update_aliases(aliases,aonly,x): "helper for ctor. takes AliasX or string as second arg" if isinstance(x,basestring): aliases[x]=x elif isinstance(x,sqparse2.AliasX): if not isinstance(x.alias,basestring): raise TypeError('alias not string',type(x.alias)) if isinstance(x.name,sqparse2.NameX): aliases.update({x.alias:x.name.name,x.name.name:x.name.name}) elif isinstance(x.name,sqparse2.SelectX): aliases.update({x.alias:x.alias}) aonly[x.alias]=x.name else: raise TypeError('aliasx_unk_thing',type(x.name)) # pragma: no cover else: raise TypeError(type(x)) # pragma: no cover
python
def update_aliases(aliases,aonly,x): "helper for ctor. takes AliasX or string as second arg" if isinstance(x,basestring): aliases[x]=x elif isinstance(x,sqparse2.AliasX): if not isinstance(x.alias,basestring): raise TypeError('alias not string',type(x.alias)) if isinstance(x.name,sqparse2.NameX): aliases.update({x.alias:x.name.name,x.name.name:x.name.name}) elif isinstance(x.name,sqparse2.SelectX): aliases.update({x.alias:x.alias}) aonly[x.alias]=x.name else: raise TypeError('aliasx_unk_thing',type(x.name)) # pragma: no cover else: raise TypeError(type(x)) # pragma: no cover
['def', 'update_aliases', '(', 'aliases', ',', 'aonly', ',', 'x', ')', ':', 'if', 'isinstance', '(', 'x', ',', 'basestring', ')', ':', 'aliases', '[', 'x', ']', '=', 'x', 'elif', 'isinstance', '(', 'x', ',', 'sqparse2', '.', 'AliasX', ')', ':', 'if', 'not', 'isinstance', '(', 'x', '.', 'alias', ',', 'basestring', ')', ':', 'raise', 'TypeError', '(', "'alias not string'", ',', 'type', '(', 'x', '.', 'alias', ')', ')', 'if', 'isinstance', '(', 'x', '.', 'name', ',', 'sqparse2', '.', 'NameX', ')', ':', 'aliases', '.', 'update', '(', '{', 'x', '.', 'alias', ':', 'x', '.', 'name', '.', 'name', ',', 'x', '.', 'name', '.', 'name', ':', 'x', '.', 'name', '.', 'name', '}', ')', 'elif', 'isinstance', '(', 'x', '.', 'name', ',', 'sqparse2', '.', 'SelectX', ')', ':', 'aliases', '.', 'update', '(', '{', 'x', '.', 'alias', ':', 'x', '.', 'alias', '}', ')', 'aonly', '[', 'x', '.', 'alias', ']', '=', 'x', '.', 'name', 'else', ':', 'raise', 'TypeError', '(', "'aliasx_unk_thing'", ',', 'type', '(', 'x', '.', 'name', ')', ')', '# pragma: no cover', 'else', ':', 'raise', 'TypeError', '(', 'type', '(', 'x', ')', ')', '# pragma: no cover']
helper for ctor. takes AliasX or string as second arg
['helper', 'for', 'ctor', '.', 'takes', 'AliasX', 'or', 'string', 'as', 'second', 'arg']
train
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L104-L114
4,782
geertj/gruvi
lib/gruvi/jsonrpc.py
JsonRpcProtocol.send_notification
def send_notification(self, method, *args): """Send a JSON-RPC notification. The notification *method* is sent with positional arguments *args*. """ message = self._version.create_request(method, args, notification=True) self.send_message(message)
python
def send_notification(self, method, *args): """Send a JSON-RPC notification. The notification *method* is sent with positional arguments *args*. """ message = self._version.create_request(method, args, notification=True) self.send_message(message)
['def', 'send_notification', '(', 'self', ',', 'method', ',', '*', 'args', ')', ':', 'message', '=', 'self', '.', '_version', '.', 'create_request', '(', 'method', ',', 'args', ',', 'notification', '=', 'True', ')', 'self', '.', 'send_message', '(', 'message', ')']
Send a JSON-RPC notification. The notification *method* is sent with positional arguments *args*.
['Send', 'a', 'JSON', '-', 'RPC', 'notification', '.']
train
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/jsonrpc.py#L443-L449
4,783
onelogin/python-saml
src/onelogin/saml2/response.py
OneLogin_Saml2_Response.get_nameid_data
def get_nameid_data(self): """ Gets the NameID Data provided by the SAML Response from the IdP :returns: Name ID Data (Value, Format, NameQualifier, SPNameQualifier) :rtype: dict """ nameid = None nameid_data = {} encrypted_id_data_nodes = self.__query_assertion('/saml:Subject/saml:EncryptedID/xenc:EncryptedData') if encrypted_id_data_nodes: encrypted_data = encrypted_id_data_nodes[0] key = self.__settings.get_sp_key() nameid = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key) else: nameid_nodes = self.__query_assertion('/saml:Subject/saml:NameID') if nameid_nodes: nameid = nameid_nodes[0] is_strict = self.__settings.is_strict() want_nameid = self.__settings.get_security_data().get('wantNameId', True) if nameid is None: if is_strict and want_nameid: raise OneLogin_Saml2_ValidationError( 'NameID not found in the assertion of the Response', OneLogin_Saml2_ValidationError.NO_NAMEID ) else: if is_strict and want_nameid and not OneLogin_Saml2_Utils.element_text(nameid): raise OneLogin_Saml2_ValidationError( 'An empty NameID value found', OneLogin_Saml2_ValidationError.EMPTY_NAMEID ) nameid_data = {'Value': OneLogin_Saml2_Utils.element_text(nameid)} for attr in ['Format', 'SPNameQualifier', 'NameQualifier']: value = nameid.get(attr, None) if value: if is_strict and attr == 'SPNameQualifier': sp_data = self.__settings.get_sp_data() sp_entity_id = sp_data.get('entityId', '') if sp_entity_id != value: raise OneLogin_Saml2_ValidationError( 'The SPNameQualifier value mistmatch the SP entityID value.', OneLogin_Saml2_ValidationError.SP_NAME_QUALIFIER_NAME_MISMATCH ) nameid_data[attr] = value return nameid_data
python
def get_nameid_data(self): """ Gets the NameID Data provided by the SAML Response from the IdP :returns: Name ID Data (Value, Format, NameQualifier, SPNameQualifier) :rtype: dict """ nameid = None nameid_data = {} encrypted_id_data_nodes = self.__query_assertion('/saml:Subject/saml:EncryptedID/xenc:EncryptedData') if encrypted_id_data_nodes: encrypted_data = encrypted_id_data_nodes[0] key = self.__settings.get_sp_key() nameid = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key) else: nameid_nodes = self.__query_assertion('/saml:Subject/saml:NameID') if nameid_nodes: nameid = nameid_nodes[0] is_strict = self.__settings.is_strict() want_nameid = self.__settings.get_security_data().get('wantNameId', True) if nameid is None: if is_strict and want_nameid: raise OneLogin_Saml2_ValidationError( 'NameID not found in the assertion of the Response', OneLogin_Saml2_ValidationError.NO_NAMEID ) else: if is_strict and want_nameid and not OneLogin_Saml2_Utils.element_text(nameid): raise OneLogin_Saml2_ValidationError( 'An empty NameID value found', OneLogin_Saml2_ValidationError.EMPTY_NAMEID ) nameid_data = {'Value': OneLogin_Saml2_Utils.element_text(nameid)} for attr in ['Format', 'SPNameQualifier', 'NameQualifier']: value = nameid.get(attr, None) if value: if is_strict and attr == 'SPNameQualifier': sp_data = self.__settings.get_sp_data() sp_entity_id = sp_data.get('entityId', '') if sp_entity_id != value: raise OneLogin_Saml2_ValidationError( 'The SPNameQualifier value mistmatch the SP entityID value.', OneLogin_Saml2_ValidationError.SP_NAME_QUALIFIER_NAME_MISMATCH ) nameid_data[attr] = value return nameid_data
['def', 'get_nameid_data', '(', 'self', ')', ':', 'nameid', '=', 'None', 'nameid_data', '=', '{', '}', 'encrypted_id_data_nodes', '=', 'self', '.', '__query_assertion', '(', "'/saml:Subject/saml:EncryptedID/xenc:EncryptedData'", ')', 'if', 'encrypted_id_data_nodes', ':', 'encrypted_data', '=', 'encrypted_id_data_nodes', '[', '0', ']', 'key', '=', 'self', '.', '__settings', '.', 'get_sp_key', '(', ')', 'nameid', '=', 'OneLogin_Saml2_Utils', '.', 'decrypt_element', '(', 'encrypted_data', ',', 'key', ')', 'else', ':', 'nameid_nodes', '=', 'self', '.', '__query_assertion', '(', "'/saml:Subject/saml:NameID'", ')', 'if', 'nameid_nodes', ':', 'nameid', '=', 'nameid_nodes', '[', '0', ']', 'is_strict', '=', 'self', '.', '__settings', '.', 'is_strict', '(', ')', 'want_nameid', '=', 'self', '.', '__settings', '.', 'get_security_data', '(', ')', '.', 'get', '(', "'wantNameId'", ',', 'True', ')', 'if', 'nameid', 'is', 'None', ':', 'if', 'is_strict', 'and', 'want_nameid', ':', 'raise', 'OneLogin_Saml2_ValidationError', '(', "'NameID not found in the assertion of the Response'", ',', 'OneLogin_Saml2_ValidationError', '.', 'NO_NAMEID', ')', 'else', ':', 'if', 'is_strict', 'and', 'want_nameid', 'and', 'not', 'OneLogin_Saml2_Utils', '.', 'element_text', '(', 'nameid', ')', ':', 'raise', 'OneLogin_Saml2_ValidationError', '(', "'An empty NameID value found'", ',', 'OneLogin_Saml2_ValidationError', '.', 'EMPTY_NAMEID', ')', 'nameid_data', '=', '{', "'Value'", ':', 'OneLogin_Saml2_Utils', '.', 'element_text', '(', 'nameid', ')', '}', 'for', 'attr', 'in', '[', "'Format'", ',', "'SPNameQualifier'", ',', "'NameQualifier'", ']', ':', 'value', '=', 'nameid', '.', 'get', '(', 'attr', ',', 'None', ')', 'if', 'value', ':', 'if', 'is_strict', 'and', 'attr', '==', "'SPNameQualifier'", ':', 'sp_data', '=', 'self', '.', '__settings', '.', 'get_sp_data', '(', ')', 'sp_entity_id', '=', 'sp_data', '.', 'get', '(', "'entityId'", ',', "''", ')', 'if', 'sp_entity_id', '!=', 'value', ':', 'raise', 'OneLogin_Saml2_ValidationError', '(', "'The SPNameQualifier value mistmatch the SP entityID value.'", ',', 'OneLogin_Saml2_ValidationError', '.', 'SP_NAME_QUALIFIER_NAME_MISMATCH', ')', 'nameid_data', '[', 'attr', ']', '=', 'value', 'return', 'nameid_data']
Gets the NameID Data provided by the SAML Response from the IdP :returns: Name ID Data (Value, Format, NameQualifier, SPNameQualifier) :rtype: dict
['Gets', 'the', 'NameID', 'Data', 'provided', 'by', 'the', 'SAML', 'Response', 'from', 'the', 'IdP']
train
https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/response.py#L438-L487
4,784
alejandroautalan/pygubu
pygubu/widgets/simpletooltip.py
ToolTip.showtip
def showtip(self, text): "Display text in tooltip window" self.text = text if self.tipwindow or not self.text: return x, y, cx, cy = self.widget.bbox("insert") x = x + self.widget.winfo_rootx() + 27 y = y + cy + self.widget.winfo_rooty() +27 self.tipwindow = tw = tk.Toplevel(self.widget) tw.wm_overrideredirect(1) tw.wm_geometry("+%d+%d" % (x, y)) try: # For Mac OS tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates") except tk.TclError: pass label = tk.Label(tw, text=self.text, justify=tk.LEFT, background="#ffffe0", foreground="black", relief=tk.SOLID, borderwidth=1, font=("tahoma", "8", "normal")) label.pack(ipadx=1)
python
def showtip(self, text): "Display text in tooltip window" self.text = text if self.tipwindow or not self.text: return x, y, cx, cy = self.widget.bbox("insert") x = x + self.widget.winfo_rootx() + 27 y = y + cy + self.widget.winfo_rooty() +27 self.tipwindow = tw = tk.Toplevel(self.widget) tw.wm_overrideredirect(1) tw.wm_geometry("+%d+%d" % (x, y)) try: # For Mac OS tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates") except tk.TclError: pass label = tk.Label(tw, text=self.text, justify=tk.LEFT, background="#ffffe0", foreground="black", relief=tk.SOLID, borderwidth=1, font=("tahoma", "8", "normal")) label.pack(ipadx=1)
['def', 'showtip', '(', 'self', ',', 'text', ')', ':', 'self', '.', 'text', '=', 'text', 'if', 'self', '.', 'tipwindow', 'or', 'not', 'self', '.', 'text', ':', 'return', 'x', ',', 'y', ',', 'cx', ',', 'cy', '=', 'self', '.', 'widget', '.', 'bbox', '(', '"insert"', ')', 'x', '=', 'x', '+', 'self', '.', 'widget', '.', 'winfo_rootx', '(', ')', '+', '27', 'y', '=', 'y', '+', 'cy', '+', 'self', '.', 'widget', '.', 'winfo_rooty', '(', ')', '+', '27', 'self', '.', 'tipwindow', '=', 'tw', '=', 'tk', '.', 'Toplevel', '(', 'self', '.', 'widget', ')', 'tw', '.', 'wm_overrideredirect', '(', '1', ')', 'tw', '.', 'wm_geometry', '(', '"+%d+%d"', '%', '(', 'x', ',', 'y', ')', ')', 'try', ':', '# For Mac OS', 'tw', '.', 'tk', '.', 'call', '(', '"::tk::unsupported::MacWindowStyle"', ',', '"style"', ',', 'tw', '.', '_w', ',', '"help"', ',', '"noActivates"', ')', 'except', 'tk', '.', 'TclError', ':', 'pass', 'label', '=', 'tk', '.', 'Label', '(', 'tw', ',', 'text', '=', 'self', '.', 'text', ',', 'justify', '=', 'tk', '.', 'LEFT', ',', 'background', '=', '"#ffffe0"', ',', 'foreground', '=', '"black"', ',', 'relief', '=', 'tk', '.', 'SOLID', ',', 'borderwidth', '=', '1', ',', 'font', '=', '(', '"tahoma"', ',', '"8"', ',', '"normal"', ')', ')', 'label', '.', 'pack', '(', 'ipadx', '=', '1', ')']
Display text in tooltip window
['Display', 'text', 'in', 'tooltip', 'window']
train
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/simpletooltip.py#L20-L42
4,785
beregond/super_state_machine
super_state_machine/machines.py
StateMachineMetaclass._add_new_methods
def _add_new_methods(cls): """Add all generated methods to result class.""" for name, method in cls.context.new_methods.items(): if hasattr(cls.context.new_class, name): raise ValueError( "Name collision in state machine class - '{name}'." .format(name) ) setattr(cls.context.new_class, name, method)
python
def _add_new_methods(cls): """Add all generated methods to result class.""" for name, method in cls.context.new_methods.items(): if hasattr(cls.context.new_class, name): raise ValueError( "Name collision in state machine class - '{name}'." .format(name) ) setattr(cls.context.new_class, name, method)
['def', '_add_new_methods', '(', 'cls', ')', ':', 'for', 'name', ',', 'method', 'in', 'cls', '.', 'context', '.', 'new_methods', '.', 'items', '(', ')', ':', 'if', 'hasattr', '(', 'cls', '.', 'context', '.', 'new_class', ',', 'name', ')', ':', 'raise', 'ValueError', '(', '"Name collision in state machine class - \'{name}\'."', '.', 'format', '(', 'name', ')', ')', 'setattr', '(', 'cls', '.', 'context', '.', 'new_class', ',', 'name', ',', 'method', ')']
Add all generated methods to result class.
['Add', 'all', 'generated', 'methods', 'to', 'result', 'class', '.']
train
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L235-L244
4,786
sony/nnabla
python/benchmark/function/function_benchmark.py
FunctionBenchmark.benchmark_forward
def benchmark_forward(self): """Benchmark forward execution. """ self._setup() def f(): self._forward() self.mod_ext.synchronize(**self.ext_kwargs) f() # Ignore first self.forward_stat = self._calc_benchmark_stat(f)
python
def benchmark_forward(self): """Benchmark forward execution. """ self._setup() def f(): self._forward() self.mod_ext.synchronize(**self.ext_kwargs) f() # Ignore first self.forward_stat = self._calc_benchmark_stat(f)
['def', 'benchmark_forward', '(', 'self', ')', ':', 'self', '.', '_setup', '(', ')', 'def', 'f', '(', ')', ':', 'self', '.', '_forward', '(', ')', 'self', '.', 'mod_ext', '.', 'synchronize', '(', '*', '*', 'self', '.', 'ext_kwargs', ')', 'f', '(', ')', '# Ignore first', 'self', '.', 'forward_stat', '=', 'self', '.', '_calc_benchmark_stat', '(', 'f', ')']
Benchmark forward execution.
['Benchmark', 'forward', 'execution', '.']
train
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/benchmark/function/function_benchmark.py#L285-L294
4,787
datosgobar/pydatajson
pydatajson/core.py
DataJson.generate_datasets_report
def generate_datasets_report( self, catalogs, harvest='valid', report=None, export_path=None, catalog_ids=None, catalog_homepages=None, catalog_orgs=None ): """Genera un reporte sobre las condiciones de la metadata de los datasets contenidos en uno o varios catálogos. Args: catalogs (str, dict o list): Uno (str o dict) o varios (list de strs y/o dicts) catálogos. harvest (str): Criterio a utilizar para determinar el valor del campo "harvest" en el reporte generado ('all', 'none', 'valid', 'report' o 'good'). report (str): Path a un reporte/config especificando qué datasets marcar con harvest=1 (sólo si harvest=='report'). export_path (str): Path donde exportar el reporte generado (en formato XLSX o CSV). Si se especifica, el método no devolverá nada. catalog_id (str): Nombre identificador del catálogo para federación catalog_homepage (str): URL del portal de datos donde está implementado el catálogo. Sólo se pasa si el portal es un CKAN o respeta la estructura: https://datos.{organismo}.gob.ar/dataset/{dataset_identifier} Returns: list: Contiene tantos dicts como datasets estén presentes en `catalogs`, con la data del reporte generado. """ assert isinstance(catalogs, string_types + (dict, list)) if isinstance(catalogs, list): assert not catalog_ids or len(catalogs) == len(catalog_ids) assert not catalog_orgs or len(catalogs) == len(catalog_orgs) assert not catalog_homepages or len( catalogs) == len(catalog_homepages) # Si se pasa un único catálogo, genero una lista que lo contenga if isinstance(catalogs, string_types + (dict,)): catalogs = [catalogs] # convierto los catalogos a objetos DataJson catalogs = list(map(readers.read_catalog_obj, catalogs)) if not catalog_ids: catalog_ids = [] for catalog in catalogs: catalog_ids.append(catalog.get("identifier", "")) if isinstance(catalog_ids, string_types + (dict,)): catalog_ids = [catalog_ids] * len(catalogs) if not catalog_orgs or\ isinstance(catalog_orgs, string_types + (dict,)): catalog_orgs = [catalog_orgs] * len(catalogs) if not catalog_homepages or isinstance(catalog_homepages, string_types + (dict,)): catalog_homepages = [catalog_homepages] * len(catalogs) catalogs_reports = [ self.catalog_report( catalog, harvest, report, catalog_id=catalog_id, catalog_homepage=catalog_homepage, catalog_org=catalog_org ) for catalog, catalog_id, catalog_org, catalog_homepage in zip(catalogs, catalog_ids, catalog_orgs, catalog_homepages) ] full_report = [] for report in catalogs_reports: full_report.extend(report) if export_path: # config styles para reportes en excel alignment = Alignment( wrap_text=True, shrink_to_fit=True, vertical="center" ) column_styles = { "dataset_title": {"width": 35}, "dataset_description": {"width": 35}, "dataset_publisher_name": {"width": 35}, "dataset_issued": {"width": 20}, "dataset_modified": {"width": 20}, "distributions_formats": {"width": 15}, "distributions_list": {"width": 90}, "notas": {"width": 50}, } cell_styles = [ {"alignment": Alignment(vertical="center")}, {"row": 1, "font": Font(bold=True)}, {"col": "dataset_title", "alignment": alignment}, {"col": "dataset_description", "alignment": alignment}, {"col": "dataset_publisher_name", "alignment": alignment}, {"col": "distributions_formats", "alignment": alignment}, {"col": "distributions_list", "alignment": alignment}, {"col": "notas", "alignment": alignment}, ] # crea tabla writers.write_table(table=full_report, path=export_path, column_styles=column_styles, cell_styles=cell_styles) else: return full_report
python
def generate_datasets_report( self, catalogs, harvest='valid', report=None, export_path=None, catalog_ids=None, catalog_homepages=None, catalog_orgs=None ): """Genera un reporte sobre las condiciones de la metadata de los datasets contenidos en uno o varios catálogos. Args: catalogs (str, dict o list): Uno (str o dict) o varios (list de strs y/o dicts) catálogos. harvest (str): Criterio a utilizar para determinar el valor del campo "harvest" en el reporte generado ('all', 'none', 'valid', 'report' o 'good'). report (str): Path a un reporte/config especificando qué datasets marcar con harvest=1 (sólo si harvest=='report'). export_path (str): Path donde exportar el reporte generado (en formato XLSX o CSV). Si se especifica, el método no devolverá nada. catalog_id (str): Nombre identificador del catálogo para federación catalog_homepage (str): URL del portal de datos donde está implementado el catálogo. Sólo se pasa si el portal es un CKAN o respeta la estructura: https://datos.{organismo}.gob.ar/dataset/{dataset_identifier} Returns: list: Contiene tantos dicts como datasets estén presentes en `catalogs`, con la data del reporte generado. """ assert isinstance(catalogs, string_types + (dict, list)) if isinstance(catalogs, list): assert not catalog_ids or len(catalogs) == len(catalog_ids) assert not catalog_orgs or len(catalogs) == len(catalog_orgs) assert not catalog_homepages or len( catalogs) == len(catalog_homepages) # Si se pasa un único catálogo, genero una lista que lo contenga if isinstance(catalogs, string_types + (dict,)): catalogs = [catalogs] # convierto los catalogos a objetos DataJson catalogs = list(map(readers.read_catalog_obj, catalogs)) if not catalog_ids: catalog_ids = [] for catalog in catalogs: catalog_ids.append(catalog.get("identifier", "")) if isinstance(catalog_ids, string_types + (dict,)): catalog_ids = [catalog_ids] * len(catalogs) if not catalog_orgs or\ isinstance(catalog_orgs, string_types + (dict,)): catalog_orgs = [catalog_orgs] * len(catalogs) if not catalog_homepages or isinstance(catalog_homepages, string_types + (dict,)): catalog_homepages = [catalog_homepages] * len(catalogs) catalogs_reports = [ self.catalog_report( catalog, harvest, report, catalog_id=catalog_id, catalog_homepage=catalog_homepage, catalog_org=catalog_org ) for catalog, catalog_id, catalog_org, catalog_homepage in zip(catalogs, catalog_ids, catalog_orgs, catalog_homepages) ] full_report = [] for report in catalogs_reports: full_report.extend(report) if export_path: # config styles para reportes en excel alignment = Alignment( wrap_text=True, shrink_to_fit=True, vertical="center" ) column_styles = { "dataset_title": {"width": 35}, "dataset_description": {"width": 35}, "dataset_publisher_name": {"width": 35}, "dataset_issued": {"width": 20}, "dataset_modified": {"width": 20}, "distributions_formats": {"width": 15}, "distributions_list": {"width": 90}, "notas": {"width": 50}, } cell_styles = [ {"alignment": Alignment(vertical="center")}, {"row": 1, "font": Font(bold=True)}, {"col": "dataset_title", "alignment": alignment}, {"col": "dataset_description", "alignment": alignment}, {"col": "dataset_publisher_name", "alignment": alignment}, {"col": "distributions_formats", "alignment": alignment}, {"col": "distributions_list", "alignment": alignment}, {"col": "notas", "alignment": alignment}, ] # crea tabla writers.write_table(table=full_report, path=export_path, column_styles=column_styles, cell_styles=cell_styles) else: return full_report
['def', 'generate_datasets_report', '(', 'self', ',', 'catalogs', ',', 'harvest', '=', "'valid'", ',', 'report', '=', 'None', ',', 'export_path', '=', 'None', ',', 'catalog_ids', '=', 'None', ',', 'catalog_homepages', '=', 'None', ',', 'catalog_orgs', '=', 'None', ')', ':', 'assert', 'isinstance', '(', 'catalogs', ',', 'string_types', '+', '(', 'dict', ',', 'list', ')', ')', 'if', 'isinstance', '(', 'catalogs', ',', 'list', ')', ':', 'assert', 'not', 'catalog_ids', 'or', 'len', '(', 'catalogs', ')', '==', 'len', '(', 'catalog_ids', ')', 'assert', 'not', 'catalog_orgs', 'or', 'len', '(', 'catalogs', ')', '==', 'len', '(', 'catalog_orgs', ')', 'assert', 'not', 'catalog_homepages', 'or', 'len', '(', 'catalogs', ')', '==', 'len', '(', 'catalog_homepages', ')', '# Si se pasa un único catálogo, genero una lista que lo contenga', 'if', 'isinstance', '(', 'catalogs', ',', 'string_types', '+', '(', 'dict', ',', ')', ')', ':', 'catalogs', '=', '[', 'catalogs', ']', '# convierto los catalogos a objetos DataJson', 'catalogs', '=', 'list', '(', 'map', '(', 'readers', '.', 'read_catalog_obj', ',', 'catalogs', ')', ')', 'if', 'not', 'catalog_ids', ':', 'catalog_ids', '=', '[', ']', 'for', 'catalog', 'in', 'catalogs', ':', 'catalog_ids', '.', 'append', '(', 'catalog', '.', 'get', '(', '"identifier"', ',', '""', ')', ')', 'if', 'isinstance', '(', 'catalog_ids', ',', 'string_types', '+', '(', 'dict', ',', ')', ')', ':', 'catalog_ids', '=', '[', 'catalog_ids', ']', '*', 'len', '(', 'catalogs', ')', 'if', 'not', 'catalog_orgs', 'or', 'isinstance', '(', 'catalog_orgs', ',', 'string_types', '+', '(', 'dict', ',', ')', ')', ':', 'catalog_orgs', '=', '[', 'catalog_orgs', ']', '*', 'len', '(', 'catalogs', ')', 'if', 'not', 'catalog_homepages', 'or', 'isinstance', '(', 'catalog_homepages', ',', 'string_types', '+', '(', 'dict', ',', ')', ')', ':', 'catalog_homepages', '=', '[', 'catalog_homepages', ']', '*', 'len', '(', 'catalogs', ')', 'catalogs_reports', '=', '[', 'self', '.', 'catalog_report', '(', 'catalog', ',', 'harvest', ',', 'report', ',', 'catalog_id', '=', 'catalog_id', ',', 'catalog_homepage', '=', 'catalog_homepage', ',', 'catalog_org', '=', 'catalog_org', ')', 'for', 'catalog', ',', 'catalog_id', ',', 'catalog_org', ',', 'catalog_homepage', 'in', 'zip', '(', 'catalogs', ',', 'catalog_ids', ',', 'catalog_orgs', ',', 'catalog_homepages', ')', ']', 'full_report', '=', '[', ']', 'for', 'report', 'in', 'catalogs_reports', ':', 'full_report', '.', 'extend', '(', 'report', ')', 'if', 'export_path', ':', '# config styles para reportes en excel', 'alignment', '=', 'Alignment', '(', 'wrap_text', '=', 'True', ',', 'shrink_to_fit', '=', 'True', ',', 'vertical', '=', '"center"', ')', 'column_styles', '=', '{', '"dataset_title"', ':', '{', '"width"', ':', '35', '}', ',', '"dataset_description"', ':', '{', '"width"', ':', '35', '}', ',', '"dataset_publisher_name"', ':', '{', '"width"', ':', '35', '}', ',', '"dataset_issued"', ':', '{', '"width"', ':', '20', '}', ',', '"dataset_modified"', ':', '{', '"width"', ':', '20', '}', ',', '"distributions_formats"', ':', '{', '"width"', ':', '15', '}', ',', '"distributions_list"', ':', '{', '"width"', ':', '90', '}', ',', '"notas"', ':', '{', '"width"', ':', '50', '}', ',', '}', 'cell_styles', '=', '[', '{', '"alignment"', ':', 'Alignment', '(', 'vertical', '=', '"center"', ')', '}', ',', '{', '"row"', ':', '1', ',', '"font"', ':', 'Font', '(', 'bold', '=', 'True', ')', '}', ',', '{', '"col"', ':', '"dataset_title"', ',', '"alignment"', ':', 'alignment', '}', ',', '{', '"col"', ':', '"dataset_description"', ',', '"alignment"', ':', 'alignment', '}', ',', '{', '"col"', ':', '"dataset_publisher_name"', ',', '"alignment"', ':', 'alignment', '}', ',', '{', '"col"', ':', '"distributions_formats"', ',', '"alignment"', ':', 'alignment', '}', ',', '{', '"col"', ':', '"distributions_list"', ',', '"alignment"', ':', 'alignment', '}', ',', '{', '"col"', ':', '"notas"', ',', '"alignment"', ':', 'alignment', '}', ',', ']', '# crea tabla', 'writers', '.', 'write_table', '(', 'table', '=', 'full_report', ',', 'path', '=', 'export_path', ',', 'column_styles', '=', 'column_styles', ',', 'cell_styles', '=', 'cell_styles', ')', 'else', ':', 'return', 'full_report']
Genera un reporte sobre las condiciones de la metadata de los datasets contenidos en uno o varios catálogos. Args: catalogs (str, dict o list): Uno (str o dict) o varios (list de strs y/o dicts) catálogos. harvest (str): Criterio a utilizar para determinar el valor del campo "harvest" en el reporte generado ('all', 'none', 'valid', 'report' o 'good'). report (str): Path a un reporte/config especificando qué datasets marcar con harvest=1 (sólo si harvest=='report'). export_path (str): Path donde exportar el reporte generado (en formato XLSX o CSV). Si se especifica, el método no devolverá nada. catalog_id (str): Nombre identificador del catálogo para federación catalog_homepage (str): URL del portal de datos donde está implementado el catálogo. Sólo se pasa si el portal es un CKAN o respeta la estructura: https://datos.{organismo}.gob.ar/dataset/{dataset_identifier} Returns: list: Contiene tantos dicts como datasets estén presentes en `catalogs`, con la data del reporte generado.
['Genera', 'un', 'reporte', 'sobre', 'las', 'condiciones', 'de', 'la', 'metadata', 'de', 'los', 'datasets', 'contenidos', 'en', 'uno', 'o', 'varios', 'catálogos', '.']
train
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/core.py#L588-L690
4,788
picklepete/pyicloud
pyicloud/services/findmyiphone.py
AppleDevice.display_message
def display_message( self, subject='Find My iPhone Alert', message="This is a note", sounds=False ): """ Send a request to the device to play a sound. It's possible to pass a custom message by changing the `subject`. """ data = json.dumps( { 'device': self.content['id'], 'subject': subject, 'sound': sounds, 'userText': True, 'text': message } ) self.session.post( self.message_url, params=self.params, data=data )
python
def display_message( self, subject='Find My iPhone Alert', message="This is a note", sounds=False ): """ Send a request to the device to play a sound. It's possible to pass a custom message by changing the `subject`. """ data = json.dumps( { 'device': self.content['id'], 'subject': subject, 'sound': sounds, 'userText': True, 'text': message } ) self.session.post( self.message_url, params=self.params, data=data )
['def', 'display_message', '(', 'self', ',', 'subject', '=', "'Find My iPhone Alert'", ',', 'message', '=', '"This is a note"', ',', 'sounds', '=', 'False', ')', ':', 'data', '=', 'json', '.', 'dumps', '(', '{', "'device'", ':', 'self', '.', 'content', '[', "'id'", ']', ',', "'subject'", ':', 'subject', ',', "'sound'", ':', 'sounds', ',', "'userText'", ':', 'True', ',', "'text'", ':', 'message', '}', ')', 'self', '.', 'session', '.', 'post', '(', 'self', '.', 'message_url', ',', 'params', '=', 'self', '.', 'params', ',', 'data', '=', 'data', ')']
Send a request to the device to play a sound. It's possible to pass a custom message by changing the `subject`.
['Send', 'a', 'request', 'to', 'the', 'device', 'to', 'play', 'a', 'sound', '.']
train
https://github.com/picklepete/pyicloud/blob/9bb6d750662ce24c8febc94807ddbdcdf3cadaa2/pyicloud/services/findmyiphone.py#L146-L167
4,789
tensorflow/tensor2tensor
tensor2tensor/models/video/savp.py
NextFrameSavpBase.pad_conv3d_lrelu
def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides, scope): """Pad, apply 3-D convolution and leaky relu.""" padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]] # tf.nn.conv3d accepts a list of 5 values for strides # with first and last value equal to 1 if isinstance(strides, numbers.Integral): strides = [strides] * 3 strides = [1] + strides + [1] # Filter_shape = [K, K, K, num_input, num_output] filter_shape = ( [kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters]) with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): conv_filter = tf.get_variable( "conv_filter", shape=filter_shape, initializer=tf.truncated_normal_initializer(stddev=0.02)) if self.hparams.use_spectral_norm: conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter) if self.is_training: tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op) padded = tf.pad(activations, padding) convolved = tf.nn.conv3d( padded, conv_filter, strides=strides, padding="VALID") rectified = tf.nn.leaky_relu(convolved, alpha=0.2) return rectified
python
def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides, scope): """Pad, apply 3-D convolution and leaky relu.""" padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]] # tf.nn.conv3d accepts a list of 5 values for strides # with first and last value equal to 1 if isinstance(strides, numbers.Integral): strides = [strides] * 3 strides = [1] + strides + [1] # Filter_shape = [K, K, K, num_input, num_output] filter_shape = ( [kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters]) with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): conv_filter = tf.get_variable( "conv_filter", shape=filter_shape, initializer=tf.truncated_normal_initializer(stddev=0.02)) if self.hparams.use_spectral_norm: conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter) if self.is_training: tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op) padded = tf.pad(activations, padding) convolved = tf.nn.conv3d( padded, conv_filter, strides=strides, padding="VALID") rectified = tf.nn.leaky_relu(convolved, alpha=0.2) return rectified
['def', 'pad_conv3d_lrelu', '(', 'self', ',', 'activations', ',', 'n_filters', ',', 'kernel_size', ',', 'strides', ',', 'scope', ')', ':', 'padding', '=', '[', '[', '0', ',', '0', ']', ',', '[', '1', ',', '1', ']', ',', '[', '1', ',', '1', ']', ',', '[', '1', ',', '1', ']', ',', '[', '0', ',', '0', ']', ']', '# tf.nn.conv3d accepts a list of 5 values for strides', '# with first and last value equal to 1', 'if', 'isinstance', '(', 'strides', ',', 'numbers', '.', 'Integral', ')', ':', 'strides', '=', '[', 'strides', ']', '*', '3', 'strides', '=', '[', '1', ']', '+', 'strides', '+', '[', '1', ']', '# Filter_shape = [K, K, K, num_input, num_output]', 'filter_shape', '=', '(', '[', 'kernel_size', ']', '*', '3', '+', 'activations', '.', 'shape', '[', '-', '1', ':', ']', '.', 'as_list', '(', ')', '+', '[', 'n_filters', ']', ')', 'with', 'tf', '.', 'variable_scope', '(', 'scope', ',', 'reuse', '=', 'tf', '.', 'AUTO_REUSE', ')', ':', 'conv_filter', '=', 'tf', '.', 'get_variable', '(', '"conv_filter"', ',', 'shape', '=', 'filter_shape', ',', 'initializer', '=', 'tf', '.', 'truncated_normal_initializer', '(', 'stddev', '=', '0.02', ')', ')', 'if', 'self', '.', 'hparams', '.', 'use_spectral_norm', ':', 'conv_filter', ',', 'assign_op', '=', 'common_layers', '.', 'apply_spectral_norm', '(', 'conv_filter', ')', 'if', 'self', '.', 'is_training', ':', 'tf', '.', 'add_to_collection', '(', 'tf', '.', 'GraphKeys', '.', 'UPDATE_OPS', ',', 'assign_op', ')', 'padded', '=', 'tf', '.', 'pad', '(', 'activations', ',', 'padding', ')', 'convolved', '=', 'tf', '.', 'nn', '.', 'conv3d', '(', 'padded', ',', 'conv_filter', ',', 'strides', '=', 'strides', ',', 'padding', '=', '"VALID"', ')', 'rectified', '=', 'tf', '.', 'nn', '.', 'leaky_relu', '(', 'convolved', ',', 'alpha', '=', '0.2', ')', 'return', 'rectified']
Pad, apply 3-D convolution and leaky relu.
['Pad', 'apply', '3', '-', 'D', 'convolution', 'and', 'leaky', 'relu', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/savp.py#L298-L327
4,790
wesyoung/pyzyre
buildutils/zyre/fetch.py
checksum_file
def checksum_file(scheme, path): """Return the checksum (hex digest) of a file""" h = getattr(hashlib, scheme)() with open(path, 'rb') as f: chunk = f.read(65535) while chunk: h.update(chunk) chunk = f.read(65535) return h.hexdigest()
python
def checksum_file(scheme, path): """Return the checksum (hex digest) of a file""" h = getattr(hashlib, scheme)() with open(path, 'rb') as f: chunk = f.read(65535) while chunk: h.update(chunk) chunk = f.read(65535) return h.hexdigest()
['def', 'checksum_file', '(', 'scheme', ',', 'path', ')', ':', 'h', '=', 'getattr', '(', 'hashlib', ',', 'scheme', ')', '(', ')', 'with', 'open', '(', 'path', ',', "'rb'", ')', 'as', 'f', ':', 'chunk', '=', 'f', '.', 'read', '(', '65535', ')', 'while', 'chunk', ':', 'h', '.', 'update', '(', 'chunk', ')', 'chunk', '=', 'f', '.', 'read', '(', '65535', ')', 'return', 'h', '.', 'hexdigest', '(', ')']
Return the checksum (hex digest) of a file
['Return', 'the', 'checksum', '(', 'hex', 'digest', ')', 'of', 'a', 'file']
train
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/buildutils/zyre/fetch.py#L50-L59
4,791
LeKono/pyhgnc
src/pyhgnc/manager/query.py
QueryManager.alias_symbol
def alias_symbol(self, alias_symbol=None, is_previous_symbol=None, hgnc_symbol=None, hgnc_identifier=None, limit=None, as_df=False): """Method to query :class:`.models.AliasSymbol` objects in database :param alias_symbol: alias symbol(s) :type alias_symbol: str or tuple(str) or None :param is_previous_symbol: flag for 'is previous' :type is_previous_symbol: bool or tuple(bool) or None :param hgnc_symbol: HGNC symbol(s) :type hgnc_symbol: str or tuple(str) or None :param hgnc_identifier: identifiers(s) in :class:`.models.HGNC` :type hgnc_identifier: int or tuple(int) or None :param limit: - if `isinstance(limit,int)==True` -> limit - if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page) - if limit == None -> all results :type limit: int or tuple(int) or None :param bool as_df: if `True` results are returned as :class:`pandas.DataFrame` :return: - if `as_df == False` -> list(:class:`.models.AliasSymbol`) - if `as_df == True` -> :class:`pandas.DataFrame` :rtype: list(:class:`.models.AliasSymbol`) or :class:`pandas.DataFrame` """ q = self.session.query(models.AliasSymbol) model_queries_config = ( (alias_symbol, models.AliasSymbol.alias_symbol), (is_previous_symbol, models.AliasSymbol.is_previous_symbol), ) q = self.get_model_queries(q, model_queries_config) one_to_many_queries_config = ( (hgnc_symbol, models.HGNC.symbol), (hgnc_identifier, models.HGNC.identifier) ) q = self.get_one_to_many_queries(q, one_to_many_queries_config) return self._limit_and_df(q, limit, as_df)
python
def alias_symbol(self, alias_symbol=None, is_previous_symbol=None, hgnc_symbol=None, hgnc_identifier=None, limit=None, as_df=False): """Method to query :class:`.models.AliasSymbol` objects in database :param alias_symbol: alias symbol(s) :type alias_symbol: str or tuple(str) or None :param is_previous_symbol: flag for 'is previous' :type is_previous_symbol: bool or tuple(bool) or None :param hgnc_symbol: HGNC symbol(s) :type hgnc_symbol: str or tuple(str) or None :param hgnc_identifier: identifiers(s) in :class:`.models.HGNC` :type hgnc_identifier: int or tuple(int) or None :param limit: - if `isinstance(limit,int)==True` -> limit - if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page) - if limit == None -> all results :type limit: int or tuple(int) or None :param bool as_df: if `True` results are returned as :class:`pandas.DataFrame` :return: - if `as_df == False` -> list(:class:`.models.AliasSymbol`) - if `as_df == True` -> :class:`pandas.DataFrame` :rtype: list(:class:`.models.AliasSymbol`) or :class:`pandas.DataFrame` """ q = self.session.query(models.AliasSymbol) model_queries_config = ( (alias_symbol, models.AliasSymbol.alias_symbol), (is_previous_symbol, models.AliasSymbol.is_previous_symbol), ) q = self.get_model_queries(q, model_queries_config) one_to_many_queries_config = ( (hgnc_symbol, models.HGNC.symbol), (hgnc_identifier, models.HGNC.identifier) ) q = self.get_one_to_many_queries(q, one_to_many_queries_config) return self._limit_and_df(q, limit, as_df)
['def', 'alias_symbol', '(', 'self', ',', 'alias_symbol', '=', 'None', ',', 'is_previous_symbol', '=', 'None', ',', 'hgnc_symbol', '=', 'None', ',', 'hgnc_identifier', '=', 'None', ',', 'limit', '=', 'None', ',', 'as_df', '=', 'False', ')', ':', 'q', '=', 'self', '.', 'session', '.', 'query', '(', 'models', '.', 'AliasSymbol', ')', 'model_queries_config', '=', '(', '(', 'alias_symbol', ',', 'models', '.', 'AliasSymbol', '.', 'alias_symbol', ')', ',', '(', 'is_previous_symbol', ',', 'models', '.', 'AliasSymbol', '.', 'is_previous_symbol', ')', ',', ')', 'q', '=', 'self', '.', 'get_model_queries', '(', 'q', ',', 'model_queries_config', ')', 'one_to_many_queries_config', '=', '(', '(', 'hgnc_symbol', ',', 'models', '.', 'HGNC', '.', 'symbol', ')', ',', '(', 'hgnc_identifier', ',', 'models', '.', 'HGNC', '.', 'identifier', ')', ')', 'q', '=', 'self', '.', 'get_one_to_many_queries', '(', 'q', ',', 'one_to_many_queries_config', ')', 'return', 'self', '.', '_limit_and_df', '(', 'q', ',', 'limit', ',', 'as_df', ')']
Method to query :class:`.models.AliasSymbol` objects in database :param alias_symbol: alias symbol(s) :type alias_symbol: str or tuple(str) or None :param is_previous_symbol: flag for 'is previous' :type is_previous_symbol: bool or tuple(bool) or None :param hgnc_symbol: HGNC symbol(s) :type hgnc_symbol: str or tuple(str) or None :param hgnc_identifier: identifiers(s) in :class:`.models.HGNC` :type hgnc_identifier: int or tuple(int) or None :param limit: - if `isinstance(limit,int)==True` -> limit - if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page) - if limit == None -> all results :type limit: int or tuple(int) or None :param bool as_df: if `True` results are returned as :class:`pandas.DataFrame` :return: - if `as_df == False` -> list(:class:`.models.AliasSymbol`) - if `as_df == True` -> :class:`pandas.DataFrame` :rtype: list(:class:`.models.AliasSymbol`) or :class:`pandas.DataFrame`
['Method', 'to', 'query', ':', 'class', ':', '.', 'models', '.', 'AliasSymbol', 'objects', 'in', 'database']
train
https://github.com/LeKono/pyhgnc/blob/1cae20c40874bfb51581b7c5c1481707e942b5d0/src/pyhgnc/manager/query.py#L416-L465
4,792
mrstephenneal/pdfconduit
pdf/utils/info.py
Info._resolved_objects
def _resolved_objects(pdf, xobject): """Retrieve rotatation info.""" return [pdf.getPage(i).get(xobject) for i in range(pdf.getNumPages())][0]
python
def _resolved_objects(pdf, xobject): """Retrieve rotatation info.""" return [pdf.getPage(i).get(xobject) for i in range(pdf.getNumPages())][0]
['def', '_resolved_objects', '(', 'pdf', ',', 'xobject', ')', ':', 'return', '[', 'pdf', '.', 'getPage', '(', 'i', ')', '.', 'get', '(', 'xobject', ')', 'for', 'i', 'in', 'range', '(', 'pdf', '.', 'getNumPages', '(', ')', ')', ']', '[', '0', ']']
Retrieve rotatation info.
['Retrieve', 'rotatation', 'info', '.']
train
https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/utils/info.py#L28-L30
4,793
thespacedoctor/fundamentals
fundamentals/mysql/yaml_to_database.py
main
def main(arguments=None): """ The main function used when ``yaml_to_database.py`` when installed as a cl tool """ # setup the command-line util settings su = tools( arguments=arguments, docString=__doc__, logLevel="WARNING", options_first=False, projectName=False ) arguments, settings, log, dbConn = su.setup() # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) if os.path.isfile(pathToYaml): from fundamentals.mysql import yaml_to_database # PARSE YAML FILE CONTENTS AND ADD TO DATABASE yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn ) yaml2db.add_yaml_file_content_to_database( filepath=pathToYaml, deleteFile=deleteFlag ) basename = os.path.basename(pathToYaml) print "Content of %(basename)s added to database" % locals() else: from fundamentals.mysql import yaml_to_database yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn, pathToInputDir=pathToYaml, deleteFiles=deleteFlag ) yaml2db.ingest() print "Content of %(pathToYaml)s directory added to database" % locals() return
python
def main(arguments=None): """ The main function used when ``yaml_to_database.py`` when installed as a cl tool """ # setup the command-line util settings su = tools( arguments=arguments, docString=__doc__, logLevel="WARNING", options_first=False, projectName=False ) arguments, settings, log, dbConn = su.setup() # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) if os.path.isfile(pathToYaml): from fundamentals.mysql import yaml_to_database # PARSE YAML FILE CONTENTS AND ADD TO DATABASE yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn ) yaml2db.add_yaml_file_content_to_database( filepath=pathToYaml, deleteFile=deleteFlag ) basename = os.path.basename(pathToYaml) print "Content of %(basename)s added to database" % locals() else: from fundamentals.mysql import yaml_to_database yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn, pathToInputDir=pathToYaml, deleteFiles=deleteFlag ) yaml2db.ingest() print "Content of %(pathToYaml)s directory added to database" % locals() return
['def', 'main', '(', 'arguments', '=', 'None', ')', ':', '# setup the command-line util settings', 'su', '=', 'tools', '(', 'arguments', '=', 'arguments', ',', 'docString', '=', '__doc__', ',', 'logLevel', '=', '"WARNING"', ',', 'options_first', '=', 'False', ',', 'projectName', '=', 'False', ')', 'arguments', ',', 'settings', ',', 'log', ',', 'dbConn', '=', 'su', '.', 'setup', '(', ')', '# unpack remaining cl arguments using `exec` to setup the variable names', '# automatically', 'for', 'arg', ',', 'val', 'in', 'arguments', '.', 'iteritems', '(', ')', ':', 'if', 'arg', '[', '0', ']', '==', '"-"', ':', 'varname', '=', 'arg', '.', 'replace', '(', '"-"', ',', '""', ')', '+', '"Flag"', 'else', ':', 'varname', '=', 'arg', '.', 'replace', '(', '"<"', ',', '""', ')', '.', 'replace', '(', '">"', ',', '""', ')', 'if', 'isinstance', '(', 'val', ',', 'str', ')', 'or', 'isinstance', '(', 'val', ',', 'unicode', ')', ':', 'exec', '(', 'varname', '+', '" = \'%s\'"', '%', '(', 'val', ',', ')', ')', 'else', ':', 'exec', '(', 'varname', '+', '" = %s"', '%', '(', 'val', ',', ')', ')', 'if', 'arg', '==', '"--dbConn"', ':', 'dbConn', '=', 'val', 'log', '.', 'debug', '(', "'%s = %s'", '%', '(', 'varname', ',', 'val', ',', ')', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'pathToYaml', ')', ':', 'from', 'fundamentals', '.', 'mysql', 'import', 'yaml_to_database', '# PARSE YAML FILE CONTENTS AND ADD TO DATABASE', 'yaml2db', '=', 'yaml_to_database', '(', 'log', '=', 'log', ',', 'settings', '=', 'settings', ',', 'dbConn', '=', 'dbConn', ')', 'yaml2db', '.', 'add_yaml_file_content_to_database', '(', 'filepath', '=', 'pathToYaml', ',', 'deleteFile', '=', 'deleteFlag', ')', 'basename', '=', 'os', '.', 'path', '.', 'basename', '(', 'pathToYaml', ')', 'print', '"Content of %(basename)s added to database"', '%', 'locals', '(', ')', 'else', ':', 'from', 'fundamentals', '.', 'mysql', 'import', 'yaml_to_database', 'yaml2db', '=', 'yaml_to_database', '(', 'log', '=', 'log', ',', 'settings', '=', 'settings', ',', 'dbConn', '=', 'dbConn', ',', 'pathToInputDir', '=', 'pathToYaml', ',', 'deleteFiles', '=', 'deleteFlag', ')', 'yaml2db', '.', 'ingest', '(', ')', 'print', '"Content of %(pathToYaml)s directory added to database"', '%', 'locals', '(', ')', 'return']
The main function used when ``yaml_to_database.py`` when installed as a cl tool
['The', 'main', 'function', 'used', 'when', 'yaml_to_database', '.', 'py', 'when', 'installed', 'as', 'a', 'cl', 'tool']
train
https://github.com/thespacedoctor/fundamentals/blob/1d2c007ac74442ec2eabde771cfcacdb9c1ab382/fundamentals/mysql/yaml_to_database.py#L38-L95
4,794
bitesofcode/projexui
projexui/widgets/xorbtreewidget/xorbtreewidget.py
XOrbTreeWidget.emitRecordClicked
def emitRecordClicked(self, item): """ Emits the record clicked signal for the given item, provided the signals are not currently blocked. :param item | <QTreeWidgetItem> """ # load the next page if isinstance(item, XBatchItem): item.startLoading() self.clearSelection() # emit that the record has been clicked if isinstance(item, XOrbRecordItem) and not self.signalsBlocked(): self.recordClicked.emit(item.record())
python
def emitRecordClicked(self, item): """ Emits the record clicked signal for the given item, provided the signals are not currently blocked. :param item | <QTreeWidgetItem> """ # load the next page if isinstance(item, XBatchItem): item.startLoading() self.clearSelection() # emit that the record has been clicked if isinstance(item, XOrbRecordItem) and not self.signalsBlocked(): self.recordClicked.emit(item.record())
['def', 'emitRecordClicked', '(', 'self', ',', 'item', ')', ':', '# load the next page\r', 'if', 'isinstance', '(', 'item', ',', 'XBatchItem', ')', ':', 'item', '.', 'startLoading', '(', ')', 'self', '.', 'clearSelection', '(', ')', '# emit that the record has been clicked\r', 'if', 'isinstance', '(', 'item', ',', 'XOrbRecordItem', ')', 'and', 'not', 'self', '.', 'signalsBlocked', '(', ')', ':', 'self', '.', 'recordClicked', '.', 'emit', '(', 'item', '.', 'record', '(', ')', ')']
Emits the record clicked signal for the given item, provided the signals are not currently blocked. :param item | <QTreeWidgetItem>
['Emits', 'the', 'record', 'clicked', 'signal', 'for', 'the', 'given', 'item', 'provided', 'the', 'signals', 'are', 'not', 'currently', 'blocked', '.', ':', 'param', 'item', '|', '<QTreeWidgetItem', '>']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbtreewidget/xorbtreewidget.py#L1134-L1148
4,795
tanghaibao/goatools
goatools/nt_utils.py
get_unique_fields
def get_unique_fields(fld_lists): """Get unique namedtuple fields, despite potential duplicates in lists of fields.""" flds = [] fld_set = set([f for flst in fld_lists for f in flst]) fld_seen = set() # Add unique fields to list of fields in order that they appear for fld_list in fld_lists: for fld in fld_list: # Add fields if the field has not yet been seen if fld not in fld_seen: flds.append(fld) fld_seen.add(fld) assert len(flds) == len(fld_set) return flds
python
def get_unique_fields(fld_lists): """Get unique namedtuple fields, despite potential duplicates in lists of fields.""" flds = [] fld_set = set([f for flst in fld_lists for f in flst]) fld_seen = set() # Add unique fields to list of fields in order that they appear for fld_list in fld_lists: for fld in fld_list: # Add fields if the field has not yet been seen if fld not in fld_seen: flds.append(fld) fld_seen.add(fld) assert len(flds) == len(fld_set) return flds
['def', 'get_unique_fields', '(', 'fld_lists', ')', ':', 'flds', '=', '[', ']', 'fld_set', '=', 'set', '(', '[', 'f', 'for', 'flst', 'in', 'fld_lists', 'for', 'f', 'in', 'flst', ']', ')', 'fld_seen', '=', 'set', '(', ')', '# Add unique fields to list of fields in order that they appear', 'for', 'fld_list', 'in', 'fld_lists', ':', 'for', 'fld', 'in', 'fld_list', ':', '# Add fields if the field has not yet been seen', 'if', 'fld', 'not', 'in', 'fld_seen', ':', 'flds', '.', 'append', '(', 'fld', ')', 'fld_seen', '.', 'add', '(', 'fld', ')', 'assert', 'len', '(', 'flds', ')', '==', 'len', '(', 'fld_set', ')', 'return', 'flds']
Get unique namedtuple fields, despite potential duplicates in lists of fields.
['Get', 'unique', 'namedtuple', 'fields', 'despite', 'potential', 'duplicates', 'in', 'lists', 'of', 'fields', '.']
train
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/nt_utils.py#L81-L94
4,796
python-rope/rope
rope/base/utils/__init__.py
saveit
def saveit(func): """A decorator that caches the return value of a function""" name = '_' + func.__name__ def _wrapper(self, *args, **kwds): if not hasattr(self, name): setattr(self, name, func(self, *args, **kwds)) return getattr(self, name) return _wrapper
python
def saveit(func): """A decorator that caches the return value of a function""" name = '_' + func.__name__ def _wrapper(self, *args, **kwds): if not hasattr(self, name): setattr(self, name, func(self, *args, **kwds)) return getattr(self, name) return _wrapper
['def', 'saveit', '(', 'func', ')', ':', 'name', '=', "'_'", '+', 'func', '.', '__name__', 'def', '_wrapper', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwds', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', 'name', ')', ':', 'setattr', '(', 'self', ',', 'name', ',', 'func', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwds', ')', ')', 'return', 'getattr', '(', 'self', ',', 'name', ')', 'return', '_wrapper']
A decorator that caches the return value of a function
['A', 'decorator', 'that', 'caches', 'the', 'return', 'value', 'of', 'a', 'function']
train
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/utils/__init__.py#L5-L14
4,797
improbable-research/keanu
keanu-python/keanu/vertex/generated.py
IntegerAddition
def IntegerAddition(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Adds one vertex to another :param left: a vertex to add :param right: a vertex to add """ return Integer(context.jvm_view().IntegerAdditionVertex, label, cast_to_integer_vertex(left), cast_to_integer_vertex(right))
python
def IntegerAddition(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Adds one vertex to another :param left: a vertex to add :param right: a vertex to add """ return Integer(context.jvm_view().IntegerAdditionVertex, label, cast_to_integer_vertex(left), cast_to_integer_vertex(right))
['def', 'IntegerAddition', '(', 'left', ':', 'vertex_constructor_param_types', ',', 'right', ':', 'vertex_constructor_param_types', ',', 'label', ':', 'Optional', '[', 'str', ']', '=', 'None', ')', '->', 'Vertex', ':', 'return', 'Integer', '(', 'context', '.', 'jvm_view', '(', ')', '.', 'IntegerAdditionVertex', ',', 'label', ',', 'cast_to_integer_vertex', '(', 'left', ')', ',', 'cast_to_integer_vertex', '(', 'right', ')', ')']
Adds one vertex to another :param left: a vertex to add :param right: a vertex to add
['Adds', 'one', 'vertex', 'to', 'another', ':', 'param', 'left', ':', 'a', 'vertex', 'to', 'add', ':', 'param', 'right', ':', 'a', 'vertex', 'to', 'add']
train
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/vertex/generated.py#L714-L721
4,798
tmux-python/tmuxp
tmuxp/cli.py
_validate_choices
def _validate_choices(options): """ Callback wrapper for validating click.prompt input. Parameters ---------- options : list List of allowed choices Returns ------- :func:`callable` callback function for value_proc in :func:`click.prompt`. Raises ------ :class:`click.BadParameter` """ def func(value): if value not in options: raise click.BadParameter( 'Possible choices are: {0}.'.format(', '.join(options)) ) return value return func
python
def _validate_choices(options): """ Callback wrapper for validating click.prompt input. Parameters ---------- options : list List of allowed choices Returns ------- :func:`callable` callback function for value_proc in :func:`click.prompt`. Raises ------ :class:`click.BadParameter` """ def func(value): if value not in options: raise click.BadParameter( 'Possible choices are: {0}.'.format(', '.join(options)) ) return value return func
['def', '_validate_choices', '(', 'options', ')', ':', 'def', 'func', '(', 'value', ')', ':', 'if', 'value', 'not', 'in', 'options', ':', 'raise', 'click', '.', 'BadParameter', '(', "'Possible choices are: {0}.'", '.', 'format', '(', "', '", '.', 'join', '(', 'options', ')', ')', ')', 'return', 'value', 'return', 'func']
Callback wrapper for validating click.prompt input. Parameters ---------- options : list List of allowed choices Returns ------- :func:`callable` callback function for value_proc in :func:`click.prompt`. Raises ------ :class:`click.BadParameter`
['Callback', 'wrapper', 'for', 'validating', 'click', '.', 'prompt', 'input', '.']
train
https://github.com/tmux-python/tmuxp/blob/f4aa2e26589a4311131898d2e4a85cb1876b5c9b/tmuxp/cli.py#L103-L129
4,799
theolind/pymysensors
mysensors/handler.py
handle_heartbeat_response_22
def handle_heartbeat_response_22(msg): """Process an internal heartbeat response message.""" if not msg.gateway.is_sensor(msg.node_id): return None msg.gateway.sensors[msg.node_id].heartbeat = msg.payload msg.gateway.alert(msg) return None
python
def handle_heartbeat_response_22(msg): """Process an internal heartbeat response message.""" if not msg.gateway.is_sensor(msg.node_id): return None msg.gateway.sensors[msg.node_id].heartbeat = msg.payload msg.gateway.alert(msg) return None
['def', 'handle_heartbeat_response_22', '(', 'msg', ')', ':', 'if', 'not', 'msg', '.', 'gateway', '.', 'is_sensor', '(', 'msg', '.', 'node_id', ')', ':', 'return', 'None', 'msg', '.', 'gateway', '.', 'sensors', '[', 'msg', '.', 'node_id', ']', '.', 'heartbeat', '=', 'msg', '.', 'payload', 'msg', '.', 'gateway', '.', 'alert', '(', 'msg', ')', 'return', 'None']
Process an internal heartbeat response message.
['Process', 'an', 'internal', 'heartbeat', 'response', 'message', '.']
train
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/handler.py#L241-L247