code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def instance(cls, *args, **kwargs): """ Singleton getter """ if cls._instance is None: cls._instance = cls(*args, **kwargs) loaded = cls._instance.reload() logging.getLogger('luigi-interface').info('Loaded %r', loaded) return cls._instance
def function[instance, parameter[cls]]: constant[ Singleton getter ] if compare[name[cls]._instance is constant[None]] begin[:] name[cls]._instance assign[=] call[name[cls], parameter[<ast.Starred object at 0x7da1b1f46740>]] variable[loaded] assign[=] call[name[cls]._instance.reload, parameter[]] call[call[name[logging].getLogger, parameter[constant[luigi-interface]]].info, parameter[constant[Loaded %r], name[loaded]]] return[name[cls]._instance]
keyword[def] identifier[instance] ( identifier[cls] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[cls] . identifier[_instance] keyword[is] keyword[None] : identifier[cls] . identifier[_instance] = identifier[cls] (* identifier[args] ,** identifier[kwargs] ) identifier[loaded] = identifier[cls] . identifier[_instance] . identifier[reload] () identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[info] ( literal[string] , identifier[loaded] ) keyword[return] identifier[cls] . identifier[_instance]
def instance(cls, *args, **kwargs): """ Singleton getter """ if cls._instance is None: cls._instance = cls(*args, **kwargs) loaded = cls._instance.reload() logging.getLogger('luigi-interface').info('Loaded %r', loaded) # depends on [control=['if'], data=[]] return cls._instance
def _markdownify(tag, _listType=None, _blockQuote=False, _listIndex=1): """recursively converts a tag into markdown""" children = tag.find_all(recursive=False) if tag.name == '[document]': for child in children: _markdownify(child) return if tag.name not in _supportedTags or not _supportedAttrs(tag): if tag.name not in _inlineTags: tag.insert_before('\n\n') tag.insert_after('\n\n') else: _escapeCharacters(tag) for child in children: _markdownify(child) return if tag.name not in ('pre', 'code'): _escapeCharacters(tag) _breakRemNewlines(tag) if tag.name == 'p': if tag.string != None: if tag.string.strip() == u'': tag.string = u'\xa0' tag.unwrap() return if not _blockQuote: tag.insert_before('\n\n') tag.insert_after('\n\n') else: tag.insert_before('\n') tag.insert_after('\n') tag.unwrap() for child in children: _markdownify(child) elif tag.name == 'br': tag.string = ' \n' tag.unwrap() elif tag.name == 'img': alt = '' title = '' if tag.has_attr('alt'): alt = tag['alt'] if tag.has_attr('title') and tag['title']: title = ' "%s"' % tag['title'] tag.string = '![%s](%s%s)' % (alt, tag['src'], title) tag.unwrap() elif tag.name == 'hr': tag.string = '\n---\n' tag.unwrap() elif tag.name == 'pre': tag.insert_before('\n\n') tag.insert_after('\n\n') if tag.code: if not _supportedAttrs(tag.code): return for child in tag.code.find_all(recursive=False): if child.name != 'br': return # code block for br in tag.code.find_all('br'): br.string = '\n' br.unwrap() tag.code.unwrap() lines = unicode(tag).strip().split('\n') lines[0] = lines[0][5:] lines[-1] = lines[-1][:-6] if not lines[-1]: lines.pop() for i,line in enumerate(lines): line = line.replace(u'\xa0', ' ') lines[i] = ' %s' % line tag.replace_with(BeautifulSoup('\n'.join(lines), 'html.parser')) return elif tag.name == 'code': # inline code if children: return tag.insert_before('`` ') tag.insert_after(' ``') tag.unwrap() elif _recursivelyValid(tag): if tag.name == 'blockquote': # ! FIXME: hack tag.insert_before('<<<BLOCKQUOTE: ') tag.insert_after('>>>') tag.unwrap() for child in children: _markdownify(child, _blockQuote=True) return elif tag.name == 'a': # process children first for child in children: _markdownify(child) if not tag.has_attr('href'): return if tag.string != tag.get('href') or tag.has_attr('title'): title = '' if tag.has_attr('title'): title = ' "%s"' % tag['title'] tag.string = '[%s](%s%s)' % (BeautifulSoup(unicode(tag), 'html.parser').string, tag.get('href', ''), title) else: # ! FIXME: hack tag.string = '<<<FLOATING LINK: %s>>>' % tag.string tag.unwrap() return elif tag.name == 'h1': tag.insert_before('\n\n# ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h2': tag.insert_before('\n\n## ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h3': tag.insert_before('\n\n### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h4': tag.insert_before('\n\n#### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h5': tag.insert_before('\n\n##### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h6': tag.insert_before('\n\n###### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name in ('ul', 'ol'): tag.insert_before('\n\n') tag.insert_after('\n\n') tag.unwrap() for i, child in enumerate(children): _markdownify(child, _listType=tag.name, _listIndex=i+1) return elif tag.name == 'li': if not _listType: # <li> outside of list; ignore return if _listType == 'ul': tag.insert_before('* ') else: tag.insert_before('%d. ' % _listIndex) for child in children: _markdownify(child) for c in tag.contents: if type(c) != bs4.element.NavigableString: continue c.replace_with('\n '.join(c.split('\n'))) tag.insert_after('\n') tag.unwrap() return elif tag.name in ('strong','b'): tag.insert_before('__') tag.insert_after('__') tag.unwrap() elif tag.name in ('em','i'): tag.insert_before('_') tag.insert_after('_') tag.unwrap() for child in children: _markdownify(child)
def function[_markdownify, parameter[tag, _listType, _blockQuote, _listIndex]]: constant[recursively converts a tag into markdown] variable[children] assign[=] call[name[tag].find_all, parameter[]] if compare[name[tag].name equal[==] constant[[document]]] begin[:] for taget[name[child]] in starred[name[children]] begin[:] call[name[_markdownify], parameter[name[child]]] return[None] if <ast.BoolOp object at 0x7da1b0efd210> begin[:] if compare[name[tag].name <ast.NotIn object at 0x7da2590d7190> name[_inlineTags]] begin[:] call[name[tag].insert_before, parameter[constant[ ]]] call[name[tag].insert_after, parameter[constant[ ]]] return[None] if compare[name[tag].name <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b0efcd30>, <ast.Constant object at 0x7da1b0efcf10>]]] begin[:] call[name[_escapeCharacters], parameter[name[tag]]] call[name[_breakRemNewlines], parameter[name[tag]]] if compare[name[tag].name equal[==] constant[p]] begin[:] if compare[name[tag].string not_equal[!=] constant[None]] begin[:] if compare[call[name[tag].string.strip, parameter[]] equal[==] constant[]] begin[:] name[tag].string assign[=] constant[ ] call[name[tag].unwrap, parameter[]] return[None] if <ast.UnaryOp object at 0x7da1b0efc8b0> begin[:] call[name[tag].insert_before, parameter[constant[ ]]] call[name[tag].insert_after, parameter[constant[ ]]] call[name[tag].unwrap, parameter[]] for taget[name[child]] in starred[name[children]] begin[:] call[name[_markdownify], parameter[name[child]]]
keyword[def] identifier[_markdownify] ( identifier[tag] , identifier[_listType] = keyword[None] , identifier[_blockQuote] = keyword[False] , identifier[_listIndex] = literal[int] ): literal[string] identifier[children] = identifier[tag] . identifier[find_all] ( identifier[recursive] = keyword[False] ) keyword[if] identifier[tag] . identifier[name] == literal[string] : keyword[for] identifier[child] keyword[in] identifier[children] : identifier[_markdownify] ( identifier[child] ) keyword[return] keyword[if] identifier[tag] . identifier[name] keyword[not] keyword[in] identifier[_supportedTags] keyword[or] keyword[not] identifier[_supportedAttrs] ( identifier[tag] ): keyword[if] identifier[tag] . identifier[name] keyword[not] keyword[in] identifier[_inlineTags] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) keyword[else] : identifier[_escapeCharacters] ( identifier[tag] ) keyword[for] identifier[child] keyword[in] identifier[children] : identifier[_markdownify] ( identifier[child] ) keyword[return] keyword[if] identifier[tag] . identifier[name] keyword[not] keyword[in] ( literal[string] , literal[string] ): identifier[_escapeCharacters] ( identifier[tag] ) identifier[_breakRemNewlines] ( identifier[tag] ) keyword[if] identifier[tag] . identifier[name] == literal[string] : keyword[if] identifier[tag] . identifier[string] != keyword[None] : keyword[if] identifier[tag] . identifier[string] . identifier[strip] ()== literal[string] : identifier[tag] . identifier[string] = literal[string] identifier[tag] . identifier[unwrap] () keyword[return] keyword[if] keyword[not] identifier[_blockQuote] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) keyword[else] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[for] identifier[child] keyword[in] identifier[children] : identifier[_markdownify] ( identifier[child] ) keyword[elif] identifier[tag] . identifier[name] == literal[string] : identifier[tag] . identifier[string] = literal[string] identifier[tag] . identifier[unwrap] () keyword[elif] identifier[tag] . identifier[name] == literal[string] : identifier[alt] = literal[string] identifier[title] = literal[string] keyword[if] identifier[tag] . identifier[has_attr] ( literal[string] ): identifier[alt] = identifier[tag] [ literal[string] ] keyword[if] identifier[tag] . identifier[has_attr] ( literal[string] ) keyword[and] identifier[tag] [ literal[string] ]: identifier[title] = literal[string] % identifier[tag] [ literal[string] ] identifier[tag] . identifier[string] = literal[string] %( identifier[alt] , identifier[tag] [ literal[string] ], identifier[title] ) identifier[tag] . identifier[unwrap] () keyword[elif] identifier[tag] . identifier[name] == literal[string] : identifier[tag] . identifier[string] = literal[string] identifier[tag] . identifier[unwrap] () keyword[elif] identifier[tag] . identifier[name] == literal[string] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) keyword[if] identifier[tag] . identifier[code] : keyword[if] keyword[not] identifier[_supportedAttrs] ( identifier[tag] . identifier[code] ): keyword[return] keyword[for] identifier[child] keyword[in] identifier[tag] . identifier[code] . identifier[find_all] ( identifier[recursive] = keyword[False] ): keyword[if] identifier[child] . identifier[name] != literal[string] : keyword[return] keyword[for] identifier[br] keyword[in] identifier[tag] . identifier[code] . identifier[find_all] ( literal[string] ): identifier[br] . identifier[string] = literal[string] identifier[br] . identifier[unwrap] () identifier[tag] . identifier[code] . identifier[unwrap] () identifier[lines] = identifier[unicode] ( identifier[tag] ). identifier[strip] (). identifier[split] ( literal[string] ) identifier[lines] [ literal[int] ]= identifier[lines] [ literal[int] ][ literal[int] :] identifier[lines] [- literal[int] ]= identifier[lines] [- literal[int] ][:- literal[int] ] keyword[if] keyword[not] identifier[lines] [- literal[int] ]: identifier[lines] . identifier[pop] () keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[lines] ): identifier[line] = identifier[line] . identifier[replace] ( literal[string] , literal[string] ) identifier[lines] [ identifier[i] ]= literal[string] % identifier[line] identifier[tag] . identifier[replace_with] ( identifier[BeautifulSoup] ( literal[string] . identifier[join] ( identifier[lines] ), literal[string] )) keyword[return] keyword[elif] identifier[tag] . identifier[name] == literal[string] : keyword[if] identifier[children] : keyword[return] identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[elif] identifier[_recursivelyValid] ( identifier[tag] ): keyword[if] identifier[tag] . identifier[name] == literal[string] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[for] identifier[child] keyword[in] identifier[children] : identifier[_markdownify] ( identifier[child] , identifier[_blockQuote] = keyword[True] ) keyword[return] keyword[elif] identifier[tag] . identifier[name] == literal[string] : keyword[for] identifier[child] keyword[in] identifier[children] : identifier[_markdownify] ( identifier[child] ) keyword[if] keyword[not] identifier[tag] . identifier[has_attr] ( literal[string] ): keyword[return] keyword[if] identifier[tag] . identifier[string] != identifier[tag] . identifier[get] ( literal[string] ) keyword[or] identifier[tag] . identifier[has_attr] ( literal[string] ): identifier[title] = literal[string] keyword[if] identifier[tag] . identifier[has_attr] ( literal[string] ): identifier[title] = literal[string] % identifier[tag] [ literal[string] ] identifier[tag] . identifier[string] = literal[string] %( identifier[BeautifulSoup] ( identifier[unicode] ( identifier[tag] ), literal[string] ). identifier[string] , identifier[tag] . identifier[get] ( literal[string] , literal[string] ), identifier[title] ) keyword[else] : identifier[tag] . identifier[string] = literal[string] % identifier[tag] . identifier[string] identifier[tag] . identifier[unwrap] () keyword[return] keyword[elif] identifier[tag] . identifier[name] == literal[string] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[elif] identifier[tag] . identifier[name] == literal[string] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[elif] identifier[tag] . identifier[name] == literal[string] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[elif] identifier[tag] . identifier[name] == literal[string] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[elif] identifier[tag] . identifier[name] == literal[string] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[elif] identifier[tag] . identifier[name] == literal[string] : identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[elif] identifier[tag] . identifier[name] keyword[in] ( literal[string] , literal[string] ): identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[for] identifier[i] , identifier[child] keyword[in] identifier[enumerate] ( identifier[children] ): identifier[_markdownify] ( identifier[child] , identifier[_listType] = identifier[tag] . identifier[name] , identifier[_listIndex] = identifier[i] + literal[int] ) keyword[return] keyword[elif] identifier[tag] . identifier[name] == literal[string] : keyword[if] keyword[not] identifier[_listType] : keyword[return] keyword[if] identifier[_listType] == literal[string] : identifier[tag] . identifier[insert_before] ( literal[string] ) keyword[else] : identifier[tag] . identifier[insert_before] ( literal[string] % identifier[_listIndex] ) keyword[for] identifier[child] keyword[in] identifier[children] : identifier[_markdownify] ( identifier[child] ) keyword[for] identifier[c] keyword[in] identifier[tag] . identifier[contents] : keyword[if] identifier[type] ( identifier[c] )!= identifier[bs4] . identifier[element] . identifier[NavigableString] : keyword[continue] identifier[c] . identifier[replace_with] ( literal[string] . identifier[join] ( identifier[c] . identifier[split] ( literal[string] ))) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[return] keyword[elif] identifier[tag] . identifier[name] keyword[in] ( literal[string] , literal[string] ): identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[elif] identifier[tag] . identifier[name] keyword[in] ( literal[string] , literal[string] ): identifier[tag] . identifier[insert_before] ( literal[string] ) identifier[tag] . identifier[insert_after] ( literal[string] ) identifier[tag] . identifier[unwrap] () keyword[for] identifier[child] keyword[in] identifier[children] : identifier[_markdownify] ( identifier[child] )
def _markdownify(tag, _listType=None, _blockQuote=False, _listIndex=1): """recursively converts a tag into markdown""" children = tag.find_all(recursive=False) if tag.name == '[document]': for child in children: _markdownify(child) # depends on [control=['for'], data=['child']] return # depends on [control=['if'], data=[]] if tag.name not in _supportedTags or not _supportedAttrs(tag): if tag.name not in _inlineTags: tag.insert_before('\n\n') tag.insert_after('\n\n') # depends on [control=['if'], data=[]] else: _escapeCharacters(tag) for child in children: _markdownify(child) # depends on [control=['for'], data=['child']] return # depends on [control=['if'], data=[]] if tag.name not in ('pre', 'code'): _escapeCharacters(tag) _breakRemNewlines(tag) # depends on [control=['if'], data=[]] if tag.name == 'p': if tag.string != None: if tag.string.strip() == u'': tag.string = u'\xa0' tag.unwrap() return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if not _blockQuote: tag.insert_before('\n\n') tag.insert_after('\n\n') # depends on [control=['if'], data=[]] else: tag.insert_before('\n') tag.insert_after('\n') tag.unwrap() for child in children: _markdownify(child) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]] elif tag.name == 'br': tag.string = ' \n' tag.unwrap() # depends on [control=['if'], data=[]] elif tag.name == 'img': alt = '' title = '' if tag.has_attr('alt'): alt = tag['alt'] # depends on [control=['if'], data=[]] if tag.has_attr('title') and tag['title']: title = ' "%s"' % tag['title'] # depends on [control=['if'], data=[]] tag.string = '![%s](%s%s)' % (alt, tag['src'], title) tag.unwrap() # depends on [control=['if'], data=[]] elif tag.name == 'hr': tag.string = '\n---\n' tag.unwrap() # depends on [control=['if'], data=[]] elif tag.name == 'pre': tag.insert_before('\n\n') tag.insert_after('\n\n') if tag.code: if not _supportedAttrs(tag.code): return # depends on [control=['if'], data=[]] for child in tag.code.find_all(recursive=False): if child.name != 'br': return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] # code block for br in tag.code.find_all('br'): br.string = '\n' br.unwrap() # depends on [control=['for'], data=['br']] tag.code.unwrap() lines = unicode(tag).strip().split('\n') lines[0] = lines[0][5:] lines[-1] = lines[-1][:-6] if not lines[-1]: lines.pop() # depends on [control=['if'], data=[]] for (i, line) in enumerate(lines): line = line.replace(u'\xa0', ' ') lines[i] = ' %s' % line # depends on [control=['for'], data=[]] tag.replace_with(BeautifulSoup('\n'.join(lines), 'html.parser')) # depends on [control=['if'], data=[]] return # depends on [control=['if'], data=[]] elif tag.name == 'code': # inline code if children: return # depends on [control=['if'], data=[]] tag.insert_before('`` ') tag.insert_after(' ``') tag.unwrap() # depends on [control=['if'], data=[]] elif _recursivelyValid(tag): if tag.name == 'blockquote': # ! FIXME: hack tag.insert_before('<<<BLOCKQUOTE: ') tag.insert_after('>>>') tag.unwrap() for child in children: _markdownify(child, _blockQuote=True) # depends on [control=['for'], data=['child']] return # depends on [control=['if'], data=[]] elif tag.name == 'a': # process children first for child in children: _markdownify(child) # depends on [control=['for'], data=['child']] if not tag.has_attr('href'): return # depends on [control=['if'], data=[]] if tag.string != tag.get('href') or tag.has_attr('title'): title = '' if tag.has_attr('title'): title = ' "%s"' % tag['title'] # depends on [control=['if'], data=[]] tag.string = '[%s](%s%s)' % (BeautifulSoup(unicode(tag), 'html.parser').string, tag.get('href', ''), title) # depends on [control=['if'], data=[]] else: # ! FIXME: hack tag.string = '<<<FLOATING LINK: %s>>>' % tag.string tag.unwrap() return # depends on [control=['if'], data=[]] elif tag.name == 'h1': tag.insert_before('\n\n# ') tag.insert_after('\n\n') tag.unwrap() # depends on [control=['if'], data=[]] elif tag.name == 'h2': tag.insert_before('\n\n## ') tag.insert_after('\n\n') tag.unwrap() # depends on [control=['if'], data=[]] elif tag.name == 'h3': tag.insert_before('\n\n### ') tag.insert_after('\n\n') tag.unwrap() # depends on [control=['if'], data=[]] elif tag.name == 'h4': tag.insert_before('\n\n#### ') tag.insert_after('\n\n') tag.unwrap() # depends on [control=['if'], data=[]] elif tag.name == 'h5': tag.insert_before('\n\n##### ') tag.insert_after('\n\n') tag.unwrap() # depends on [control=['if'], data=[]] elif tag.name == 'h6': tag.insert_before('\n\n###### ') tag.insert_after('\n\n') tag.unwrap() # depends on [control=['if'], data=[]] elif tag.name in ('ul', 'ol'): tag.insert_before('\n\n') tag.insert_after('\n\n') tag.unwrap() for (i, child) in enumerate(children): _markdownify(child, _listType=tag.name, _listIndex=i + 1) # depends on [control=['for'], data=[]] return # depends on [control=['if'], data=[]] elif tag.name == 'li': if not _listType: # <li> outside of list; ignore return # depends on [control=['if'], data=[]] if _listType == 'ul': tag.insert_before('* ') # depends on [control=['if'], data=[]] else: tag.insert_before('%d. ' % _listIndex) for child in children: _markdownify(child) # depends on [control=['for'], data=['child']] for c in tag.contents: if type(c) != bs4.element.NavigableString: continue # depends on [control=['if'], data=[]] c.replace_with('\n '.join(c.split('\n'))) # depends on [control=['for'], data=['c']] tag.insert_after('\n') tag.unwrap() return # depends on [control=['if'], data=[]] elif tag.name in ('strong', 'b'): tag.insert_before('__') tag.insert_after('__') tag.unwrap() # depends on [control=['if'], data=[]] elif tag.name in ('em', 'i'): tag.insert_before('_') tag.insert_after('_') tag.unwrap() # depends on [control=['if'], data=[]] for child in children: _markdownify(child) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
def __check_port(self, port): """check port status return True if port is free, False else """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind((_host(), port)) return True except socket.error: return False finally: s.close()
def function[__check_port, parameter[self, port]]: constant[check port status return True if port is free, False else ] variable[s] assign[=] call[name[socket].socket, parameter[name[socket].AF_INET, name[socket].SOCK_STREAM]] <ast.Try object at 0x7da20c6e7220>
keyword[def] identifier[__check_port] ( identifier[self] , identifier[port] ): literal[string] identifier[s] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_STREAM] ) keyword[try] : identifier[s] . identifier[bind] (( identifier[_host] (), identifier[port] )) keyword[return] keyword[True] keyword[except] identifier[socket] . identifier[error] : keyword[return] keyword[False] keyword[finally] : identifier[s] . identifier[close] ()
def __check_port(self, port): """check port status return True if port is free, False else """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind((_host(), port)) return True # depends on [control=['try'], data=[]] except socket.error: return False # depends on [control=['except'], data=[]] finally: s.close()
def remove_droppable(self, droppable_id): """remove a droppable, given the id""" updated_droppables = [] for droppable in self.my_osid_object_form._my_map['droppables']: if droppable['id'] != droppable_id: updated_droppables.append(droppable) self.my_osid_object_form._my_map['droppables'] = updated_droppables
def function[remove_droppable, parameter[self, droppable_id]]: constant[remove a droppable, given the id] variable[updated_droppables] assign[=] list[[]] for taget[name[droppable]] in starred[call[name[self].my_osid_object_form._my_map][constant[droppables]]] begin[:] if compare[call[name[droppable]][constant[id]] not_equal[!=] name[droppable_id]] begin[:] call[name[updated_droppables].append, parameter[name[droppable]]] call[name[self].my_osid_object_form._my_map][constant[droppables]] assign[=] name[updated_droppables]
keyword[def] identifier[remove_droppable] ( identifier[self] , identifier[droppable_id] ): literal[string] identifier[updated_droppables] =[] keyword[for] identifier[droppable] keyword[in] identifier[self] . identifier[my_osid_object_form] . identifier[_my_map] [ literal[string] ]: keyword[if] identifier[droppable] [ literal[string] ]!= identifier[droppable_id] : identifier[updated_droppables] . identifier[append] ( identifier[droppable] ) identifier[self] . identifier[my_osid_object_form] . identifier[_my_map] [ literal[string] ]= identifier[updated_droppables]
def remove_droppable(self, droppable_id): """remove a droppable, given the id""" updated_droppables = [] for droppable in self.my_osid_object_form._my_map['droppables']: if droppable['id'] != droppable_id: updated_droppables.append(droppable) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['droppable']] self.my_osid_object_form._my_map['droppables'] = updated_droppables
def _validate_nullable(self, nullable, field, value): """ {'type': 'boolean'} """ if value is None: if not nullable: self._error(field, errors.NOT_NULLABLE) self._drop_remaining_rules( 'allowed', 'empty', 'forbidden', 'items', 'keysrules', 'min', 'max', 'minlength', 'maxlength', 'regex', 'schema', 'type', 'valuesrules', )
def function[_validate_nullable, parameter[self, nullable, field, value]]: constant[ {'type': 'boolean'} ] if compare[name[value] is constant[None]] begin[:] if <ast.UnaryOp object at 0x7da2054a55d0> begin[:] call[name[self]._error, parameter[name[field], name[errors].NOT_NULLABLE]] call[name[self]._drop_remaining_rules, parameter[constant[allowed], constant[empty], constant[forbidden], constant[items], constant[keysrules], constant[min], constant[max], constant[minlength], constant[maxlength], constant[regex], constant[schema], constant[type], constant[valuesrules]]]
keyword[def] identifier[_validate_nullable] ( identifier[self] , identifier[nullable] , identifier[field] , identifier[value] ): literal[string] keyword[if] identifier[value] keyword[is] keyword[None] : keyword[if] keyword[not] identifier[nullable] : identifier[self] . identifier[_error] ( identifier[field] , identifier[errors] . identifier[NOT_NULLABLE] ) identifier[self] . identifier[_drop_remaining_rules] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , )
def _validate_nullable(self, nullable, field, value): """ {'type': 'boolean'} """ if value is None: if not nullable: self._error(field, errors.NOT_NULLABLE) # depends on [control=['if'], data=[]] self._drop_remaining_rules('allowed', 'empty', 'forbidden', 'items', 'keysrules', 'min', 'max', 'minlength', 'maxlength', 'regex', 'schema', 'type', 'valuesrules') # depends on [control=['if'], data=[]]
def set_param(self, param, value, header='Content-Type', requote=True, charset=None, language=''): """Set a parameter in the Content-Type header. If the parameter already exists in the header, its value will be replaced with the new value. If header is Content-Type and has not yet been defined for this message, it will be set to "text/plain" and the new parameter and value will be appended as per RFC 2045. An alternate header can specified in the header argument, and all parameters will be quoted as necessary unless requote is False. If charset is specified, the parameter will be encoded according to RFC 2231. Optional language specifies the RFC 2231 language, defaulting to the empty string. Both charset and language should be strings. """ if not isinstance(value, tuple) and charset: value = (charset, language, value) if header not in self and header.lower() == 'content-type': ctype = 'text/plain' else: ctype = self.get(header) if not self.get_param(param, header=header): if not ctype: ctype = _formatparam(param, value, requote) else: ctype = SEMISPACE.join( [ctype, _formatparam(param, value, requote)]) else: ctype = '' for old_param, old_value in self.get_params(header=header, unquote=requote): append_param = '' if old_param.lower() == param.lower(): append_param = _formatparam(param, value, requote) else: append_param = _formatparam(old_param, old_value, requote) if not ctype: ctype = append_param else: ctype = SEMISPACE.join([ctype, append_param]) if ctype != self.get(header): del self[header] self[header] = ctype
def function[set_param, parameter[self, param, value, header, requote, charset, language]]: constant[Set a parameter in the Content-Type header. If the parameter already exists in the header, its value will be replaced with the new value. If header is Content-Type and has not yet been defined for this message, it will be set to "text/plain" and the new parameter and value will be appended as per RFC 2045. An alternate header can specified in the header argument, and all parameters will be quoted as necessary unless requote is False. If charset is specified, the parameter will be encoded according to RFC 2231. Optional language specifies the RFC 2231 language, defaulting to the empty string. Both charset and language should be strings. ] if <ast.BoolOp object at 0x7da20c6a90f0> begin[:] variable[value] assign[=] tuple[[<ast.Name object at 0x7da20c6a87c0>, <ast.Name object at 0x7da20c6a9330>, <ast.Name object at 0x7da2044c09d0>]] if <ast.BoolOp object at 0x7da2044c1a20> begin[:] variable[ctype] assign[=] constant[text/plain] if <ast.UnaryOp object at 0x7da2044c17e0> begin[:] if <ast.UnaryOp object at 0x7da2044c23b0> begin[:] variable[ctype] assign[=] call[name[_formatparam], parameter[name[param], name[value], name[requote]]] if compare[name[ctype] not_equal[!=] call[name[self].get, parameter[name[header]]]] begin[:] <ast.Delete object at 0x7da2044c18d0> call[name[self]][name[header]] assign[=] name[ctype]
keyword[def] identifier[set_param] ( identifier[self] , identifier[param] , identifier[value] , identifier[header] = literal[string] , identifier[requote] = keyword[True] , identifier[charset] = keyword[None] , identifier[language] = literal[string] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[tuple] ) keyword[and] identifier[charset] : identifier[value] =( identifier[charset] , identifier[language] , identifier[value] ) keyword[if] identifier[header] keyword[not] keyword[in] identifier[self] keyword[and] identifier[header] . identifier[lower] ()== literal[string] : identifier[ctype] = literal[string] keyword[else] : identifier[ctype] = identifier[self] . identifier[get] ( identifier[header] ) keyword[if] keyword[not] identifier[self] . identifier[get_param] ( identifier[param] , identifier[header] = identifier[header] ): keyword[if] keyword[not] identifier[ctype] : identifier[ctype] = identifier[_formatparam] ( identifier[param] , identifier[value] , identifier[requote] ) keyword[else] : identifier[ctype] = identifier[SEMISPACE] . identifier[join] ( [ identifier[ctype] , identifier[_formatparam] ( identifier[param] , identifier[value] , identifier[requote] )]) keyword[else] : identifier[ctype] = literal[string] keyword[for] identifier[old_param] , identifier[old_value] keyword[in] identifier[self] . identifier[get_params] ( identifier[header] = identifier[header] , identifier[unquote] = identifier[requote] ): identifier[append_param] = literal[string] keyword[if] identifier[old_param] . identifier[lower] ()== identifier[param] . identifier[lower] (): identifier[append_param] = identifier[_formatparam] ( identifier[param] , identifier[value] , identifier[requote] ) keyword[else] : identifier[append_param] = identifier[_formatparam] ( identifier[old_param] , identifier[old_value] , identifier[requote] ) keyword[if] keyword[not] identifier[ctype] : identifier[ctype] = identifier[append_param] keyword[else] : identifier[ctype] = identifier[SEMISPACE] . identifier[join] ([ identifier[ctype] , identifier[append_param] ]) keyword[if] identifier[ctype] != identifier[self] . identifier[get] ( identifier[header] ): keyword[del] identifier[self] [ identifier[header] ] identifier[self] [ identifier[header] ]= identifier[ctype]
def set_param(self, param, value, header='Content-Type', requote=True, charset=None, language=''): """Set a parameter in the Content-Type header. If the parameter already exists in the header, its value will be replaced with the new value. If header is Content-Type and has not yet been defined for this message, it will be set to "text/plain" and the new parameter and value will be appended as per RFC 2045. An alternate header can specified in the header argument, and all parameters will be quoted as necessary unless requote is False. If charset is specified, the parameter will be encoded according to RFC 2231. Optional language specifies the RFC 2231 language, defaulting to the empty string. Both charset and language should be strings. """ if not isinstance(value, tuple) and charset: value = (charset, language, value) # depends on [control=['if'], data=[]] if header not in self and header.lower() == 'content-type': ctype = 'text/plain' # depends on [control=['if'], data=[]] else: ctype = self.get(header) if not self.get_param(param, header=header): if not ctype: ctype = _formatparam(param, value, requote) # depends on [control=['if'], data=[]] else: ctype = SEMISPACE.join([ctype, _formatparam(param, value, requote)]) # depends on [control=['if'], data=[]] else: ctype = '' for (old_param, old_value) in self.get_params(header=header, unquote=requote): append_param = '' if old_param.lower() == param.lower(): append_param = _formatparam(param, value, requote) # depends on [control=['if'], data=[]] else: append_param = _formatparam(old_param, old_value, requote) if not ctype: ctype = append_param # depends on [control=['if'], data=[]] else: ctype = SEMISPACE.join([ctype, append_param]) # depends on [control=['for'], data=[]] if ctype != self.get(header): del self[header] self[header] = ctype # depends on [control=['if'], data=['ctype']]
def load_map_file(filename, name=None, check_integrity=True): """ Loads a ContainerMap configuration from a YAML file. :param filename: YAML file name. :type filename: unicode | str :param name: Name of the ContainerMap. If ``None`` will attempt to find a ``name`` element on the root level of the document; an empty string names the map according to the file, without extension. :type name: unicode | str :param check_integrity: Performs a brief integrity check; default is ``True``. :type check_integrity: bool :return: A ContainerMap object. :rtype: ContainerMap """ if name == '': base_name = os.path.basename(filename) map_name, __, __ = os.path.basename(base_name).rpartition(os.path.extsep) else: map_name = name with open(filename, 'r') as f: return load_map(f, name=map_name, check_integrity=check_integrity)
def function[load_map_file, parameter[filename, name, check_integrity]]: constant[ Loads a ContainerMap configuration from a YAML file. :param filename: YAML file name. :type filename: unicode | str :param name: Name of the ContainerMap. If ``None`` will attempt to find a ``name`` element on the root level of the document; an empty string names the map according to the file, without extension. :type name: unicode | str :param check_integrity: Performs a brief integrity check; default is ``True``. :type check_integrity: bool :return: A ContainerMap object. :rtype: ContainerMap ] if compare[name[name] equal[==] constant[]] begin[:] variable[base_name] assign[=] call[name[os].path.basename, parameter[name[filename]]] <ast.Tuple object at 0x7da20c7c8100> assign[=] call[call[name[os].path.basename, parameter[name[base_name]]].rpartition, parameter[name[os].path.extsep]] with call[name[open], parameter[name[filename], constant[r]]] begin[:] return[call[name[load_map], parameter[name[f]]]]
keyword[def] identifier[load_map_file] ( identifier[filename] , identifier[name] = keyword[None] , identifier[check_integrity] = keyword[True] ): literal[string] keyword[if] identifier[name] == literal[string] : identifier[base_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] ) identifier[map_name] , identifier[__] , identifier[__] = identifier[os] . identifier[path] . identifier[basename] ( identifier[base_name] ). identifier[rpartition] ( identifier[os] . identifier[path] . identifier[extsep] ) keyword[else] : identifier[map_name] = identifier[name] keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] : keyword[return] identifier[load_map] ( identifier[f] , identifier[name] = identifier[map_name] , identifier[check_integrity] = identifier[check_integrity] )
def load_map_file(filename, name=None, check_integrity=True): """ Loads a ContainerMap configuration from a YAML file. :param filename: YAML file name. :type filename: unicode | str :param name: Name of the ContainerMap. If ``None`` will attempt to find a ``name`` element on the root level of the document; an empty string names the map according to the file, without extension. :type name: unicode | str :param check_integrity: Performs a brief integrity check; default is ``True``. :type check_integrity: bool :return: A ContainerMap object. :rtype: ContainerMap """ if name == '': base_name = os.path.basename(filename) (map_name, __, __) = os.path.basename(base_name).rpartition(os.path.extsep) # depends on [control=['if'], data=[]] else: map_name = name with open(filename, 'r') as f: return load_map(f, name=map_name, check_integrity=check_integrity) # depends on [control=['with'], data=['f']]
def cli(): """Entry point for the application script""" parser = get_argparser() args = parser.parse_args() check_args(args) if args.v: print('ERAlchemy version {}.'.format(__version__)) exit(0) render_er( args.i, args.o, include_tables=args.include_tables, include_columns=args.include_columns, exclude_tables=args.exclude_tables, exclude_columns=args.exclude_columns, schema=args.s )
def function[cli, parameter[]]: constant[Entry point for the application script] variable[parser] assign[=] call[name[get_argparser], parameter[]] variable[args] assign[=] call[name[parser].parse_args, parameter[]] call[name[check_args], parameter[name[args]]] if name[args].v begin[:] call[name[print], parameter[call[constant[ERAlchemy version {}.].format, parameter[name[__version__]]]]] call[name[exit], parameter[constant[0]]] call[name[render_er], parameter[name[args].i, name[args].o]]
keyword[def] identifier[cli] (): literal[string] identifier[parser] = identifier[get_argparser] () identifier[args] = identifier[parser] . identifier[parse_args] () identifier[check_args] ( identifier[args] ) keyword[if] identifier[args] . identifier[v] : identifier[print] ( literal[string] . identifier[format] ( identifier[__version__] )) identifier[exit] ( literal[int] ) identifier[render_er] ( identifier[args] . identifier[i] , identifier[args] . identifier[o] , identifier[include_tables] = identifier[args] . identifier[include_tables] , identifier[include_columns] = identifier[args] . identifier[include_columns] , identifier[exclude_tables] = identifier[args] . identifier[exclude_tables] , identifier[exclude_columns] = identifier[args] . identifier[exclude_columns] , identifier[schema] = identifier[args] . identifier[s] )
def cli(): """Entry point for the application script""" parser = get_argparser() args = parser.parse_args() check_args(args) if args.v: print('ERAlchemy version {}.'.format(__version__)) exit(0) # depends on [control=['if'], data=[]] render_er(args.i, args.o, include_tables=args.include_tables, include_columns=args.include_columns, exclude_tables=args.exclude_tables, exclude_columns=args.exclude_columns, schema=args.s)
def check_name_collision( self, name, block_id, checked_ops ): """ Are there any colliding names in this block? Set the '__collided__' flag and related flags if so, so we don't commit them. Not called directly; called by the @state_create() decorator in blockstack.lib.operations.register """ return self.check_collision( "name", name, block_id, checked_ops, OPCODE_NAME_STATE_CREATIONS )
def function[check_name_collision, parameter[self, name, block_id, checked_ops]]: constant[ Are there any colliding names in this block? Set the '__collided__' flag and related flags if so, so we don't commit them. Not called directly; called by the @state_create() decorator in blockstack.lib.operations.register ] return[call[name[self].check_collision, parameter[constant[name], name[name], name[block_id], name[checked_ops], name[OPCODE_NAME_STATE_CREATIONS]]]]
keyword[def] identifier[check_name_collision] ( identifier[self] , identifier[name] , identifier[block_id] , identifier[checked_ops] ): literal[string] keyword[return] identifier[self] . identifier[check_collision] ( literal[string] , identifier[name] , identifier[block_id] , identifier[checked_ops] , identifier[OPCODE_NAME_STATE_CREATIONS] )
def check_name_collision(self, name, block_id, checked_ops): """ Are there any colliding names in this block? Set the '__collided__' flag and related flags if so, so we don't commit them. Not called directly; called by the @state_create() decorator in blockstack.lib.operations.register """ return self.check_collision('name', name, block_id, checked_ops, OPCODE_NAME_STATE_CREATIONS)
def limit_pos(p, se_pos, nw_pos): """ Limits position p to stay inside containing state :param p: Position to limit :param se_pos: Bottom/Right boundary :param nw_pos: Top/Left boundary :return: """ if p > se_pos: _update(p, se_pos) elif p < nw_pos: _update(p, nw_pos)
def function[limit_pos, parameter[p, se_pos, nw_pos]]: constant[ Limits position p to stay inside containing state :param p: Position to limit :param se_pos: Bottom/Right boundary :param nw_pos: Top/Left boundary :return: ] if compare[name[p] greater[>] name[se_pos]] begin[:] call[name[_update], parameter[name[p], name[se_pos]]]
keyword[def] identifier[limit_pos] ( identifier[p] , identifier[se_pos] , identifier[nw_pos] ): literal[string] keyword[if] identifier[p] > identifier[se_pos] : identifier[_update] ( identifier[p] , identifier[se_pos] ) keyword[elif] identifier[p] < identifier[nw_pos] : identifier[_update] ( identifier[p] , identifier[nw_pos] )
def limit_pos(p, se_pos, nw_pos): """ Limits position p to stay inside containing state :param p: Position to limit :param se_pos: Bottom/Right boundary :param nw_pos: Top/Left boundary :return: """ if p > se_pos: _update(p, se_pos) # depends on [control=['if'], data=['p', 'se_pos']] elif p < nw_pos: _update(p, nw_pos) # depends on [control=['if'], data=['p', 'nw_pos']]
def getOverlayTexelAspect(self, ulOverlayHandle): """Gets the aspect ratio of the texels in the overlay. Defaults to 1.0""" fn = self.function_table.getOverlayTexelAspect pfTexelAspect = c_float() result = fn(ulOverlayHandle, byref(pfTexelAspect)) return result, pfTexelAspect.value
def function[getOverlayTexelAspect, parameter[self, ulOverlayHandle]]: constant[Gets the aspect ratio of the texels in the overlay. Defaults to 1.0] variable[fn] assign[=] name[self].function_table.getOverlayTexelAspect variable[pfTexelAspect] assign[=] call[name[c_float], parameter[]] variable[result] assign[=] call[name[fn], parameter[name[ulOverlayHandle], call[name[byref], parameter[name[pfTexelAspect]]]]] return[tuple[[<ast.Name object at 0x7da204623fd0>, <ast.Attribute object at 0x7da2046210f0>]]]
keyword[def] identifier[getOverlayTexelAspect] ( identifier[self] , identifier[ulOverlayHandle] ): literal[string] identifier[fn] = identifier[self] . identifier[function_table] . identifier[getOverlayTexelAspect] identifier[pfTexelAspect] = identifier[c_float] () identifier[result] = identifier[fn] ( identifier[ulOverlayHandle] , identifier[byref] ( identifier[pfTexelAspect] )) keyword[return] identifier[result] , identifier[pfTexelAspect] . identifier[value]
def getOverlayTexelAspect(self, ulOverlayHandle): """Gets the aspect ratio of the texels in the overlay. Defaults to 1.0""" fn = self.function_table.getOverlayTexelAspect pfTexelAspect = c_float() result = fn(ulOverlayHandle, byref(pfTexelAspect)) return (result, pfTexelAspect.value)
def __load_identities(self, identities, uuid, verbose): """Store identities""" self.log("-- loading identities", verbose) for identity in identities: try: api.add_identity(self.db, identity.source, identity.email, identity.name, identity.username, uuid) self.new_uids.add(uuid) except AlreadyExistsError as e: self.warning(str(e), verbose) with self.db.connect() as session: stored_identity = find_identity(session, e.eid) stored_uuid = stored_identity.uuid if uuid != stored_uuid: msg = "%s is already assigned to %s. Merging." % (uuid, stored_uuid) self.warning(msg, verbose) api.merge_unique_identities(self.db, uuid, stored_uuid) if uuid in self.new_uids: self.new_uids.remove(uuid) self.new_uids.add(stored_uuid) uuid = stored_uuid self.log("-- identities loaded", verbose) return uuid
def function[__load_identities, parameter[self, identities, uuid, verbose]]: constant[Store identities] call[name[self].log, parameter[constant[-- loading identities], name[verbose]]] for taget[name[identity]] in starred[name[identities]] begin[:] <ast.Try object at 0x7da1b0c647c0> call[name[self].log, parameter[constant[-- identities loaded], name[verbose]]] return[name[uuid]]
keyword[def] identifier[__load_identities] ( identifier[self] , identifier[identities] , identifier[uuid] , identifier[verbose] ): literal[string] identifier[self] . identifier[log] ( literal[string] , identifier[verbose] ) keyword[for] identifier[identity] keyword[in] identifier[identities] : keyword[try] : identifier[api] . identifier[add_identity] ( identifier[self] . identifier[db] , identifier[identity] . identifier[source] , identifier[identity] . identifier[email] , identifier[identity] . identifier[name] , identifier[identity] . identifier[username] , identifier[uuid] ) identifier[self] . identifier[new_uids] . identifier[add] ( identifier[uuid] ) keyword[except] identifier[AlreadyExistsError] keyword[as] identifier[e] : identifier[self] . identifier[warning] ( identifier[str] ( identifier[e] ), identifier[verbose] ) keyword[with] identifier[self] . identifier[db] . identifier[connect] () keyword[as] identifier[session] : identifier[stored_identity] = identifier[find_identity] ( identifier[session] , identifier[e] . identifier[eid] ) identifier[stored_uuid] = identifier[stored_identity] . identifier[uuid] keyword[if] identifier[uuid] != identifier[stored_uuid] : identifier[msg] = literal[string] %( identifier[uuid] , identifier[stored_uuid] ) identifier[self] . identifier[warning] ( identifier[msg] , identifier[verbose] ) identifier[api] . identifier[merge_unique_identities] ( identifier[self] . identifier[db] , identifier[uuid] , identifier[stored_uuid] ) keyword[if] identifier[uuid] keyword[in] identifier[self] . identifier[new_uids] : identifier[self] . identifier[new_uids] . identifier[remove] ( identifier[uuid] ) identifier[self] . identifier[new_uids] . identifier[add] ( identifier[stored_uuid] ) identifier[uuid] = identifier[stored_uuid] identifier[self] . identifier[log] ( literal[string] , identifier[verbose] ) keyword[return] identifier[uuid]
def __load_identities(self, identities, uuid, verbose): """Store identities""" self.log('-- loading identities', verbose) for identity in identities: try: api.add_identity(self.db, identity.source, identity.email, identity.name, identity.username, uuid) self.new_uids.add(uuid) # depends on [control=['try'], data=[]] except AlreadyExistsError as e: self.warning(str(e), verbose) with self.db.connect() as session: stored_identity = find_identity(session, e.eid) stored_uuid = stored_identity.uuid # depends on [control=['with'], data=['session']] if uuid != stored_uuid: msg = '%s is already assigned to %s. Merging.' % (uuid, stored_uuid) self.warning(msg, verbose) api.merge_unique_identities(self.db, uuid, stored_uuid) if uuid in self.new_uids: self.new_uids.remove(uuid) # depends on [control=['if'], data=['uuid']] self.new_uids.add(stored_uuid) uuid = stored_uuid # depends on [control=['if'], data=['uuid', 'stored_uuid']] # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['identity']] self.log('-- identities loaded', verbose) return uuid
def send_attachment(self, recipient_id, attachment_type, attachment_path, notification_type=NotificationType.regular): """Send an attachment to the specified recipient using local path. Input: recipient_id: recipient id to send to attachment_type: type of attachment (image, video, audio, file) attachment_path: Path of attachment Output: Response from API as <dict> """ payload = { 'recipient': { { 'id': recipient_id } }, 'notification_type': notification_type, 'message': { { 'attachment': { 'type': attachment_type, 'payload': {} } } }, 'filedata': (os.path.basename(attachment_path), open(attachment_path, 'rb')) } multipart_data = MultipartEncoder(payload) multipart_header = { 'Content-Type': multipart_data.content_type } return requests.post(self.graph_url, data=multipart_data, params=self.auth_args, headers=multipart_header).json()
def function[send_attachment, parameter[self, recipient_id, attachment_type, attachment_path, notification_type]]: constant[Send an attachment to the specified recipient using local path. Input: recipient_id: recipient id to send to attachment_type: type of attachment (image, video, audio, file) attachment_path: Path of attachment Output: Response from API as <dict> ] variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d57e80>, <ast.Constant object at 0x7da1b1d544c0>, <ast.Constant object at 0x7da1b1d57d60>, <ast.Constant object at 0x7da1b1d57730>], [<ast.Set object at 0x7da1b1d542e0>, <ast.Name object at 0x7da1b1d549a0>, <ast.Set object at 0x7da1b1d547c0>, <ast.Tuple object at 0x7da1b1d56b90>]] variable[multipart_data] assign[=] call[name[MultipartEncoder], parameter[name[payload]]] variable[multipart_header] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d55ba0>], [<ast.Attribute object at 0x7da1b1d551e0>]] return[call[call[name[requests].post, parameter[name[self].graph_url]].json, parameter[]]]
keyword[def] identifier[send_attachment] ( identifier[self] , identifier[recipient_id] , identifier[attachment_type] , identifier[attachment_path] , identifier[notification_type] = identifier[NotificationType] . identifier[regular] ): literal[string] identifier[payload] ={ literal[string] :{ { literal[string] : identifier[recipient_id] } }, literal[string] : identifier[notification_type] , literal[string] :{ { literal[string] :{ literal[string] : identifier[attachment_type] , literal[string] :{} } } }, literal[string] :( identifier[os] . identifier[path] . identifier[basename] ( identifier[attachment_path] ), identifier[open] ( identifier[attachment_path] , literal[string] )) } identifier[multipart_data] = identifier[MultipartEncoder] ( identifier[payload] ) identifier[multipart_header] ={ literal[string] : identifier[multipart_data] . identifier[content_type] } keyword[return] identifier[requests] . identifier[post] ( identifier[self] . identifier[graph_url] , identifier[data] = identifier[multipart_data] , identifier[params] = identifier[self] . identifier[auth_args] , identifier[headers] = identifier[multipart_header] ). identifier[json] ()
def send_attachment(self, recipient_id, attachment_type, attachment_path, notification_type=NotificationType.regular): """Send an attachment to the specified recipient using local path. Input: recipient_id: recipient id to send to attachment_type: type of attachment (image, video, audio, file) attachment_path: Path of attachment Output: Response from API as <dict> """ payload = {'recipient': {{'id': recipient_id}}, 'notification_type': notification_type, 'message': {{'attachment': {'type': attachment_type, 'payload': {}}}}, 'filedata': (os.path.basename(attachment_path), open(attachment_path, 'rb'))} multipart_data = MultipartEncoder(payload) multipart_header = {'Content-Type': multipart_data.content_type} return requests.post(self.graph_url, data=multipart_data, params=self.auth_args, headers=multipart_header).json()
def _get_classpath(self, workunit_factory): """Returns the bootstrapped ivy classpath as a list of jar paths. :raises: Bootstrapper.Error if the classpath could not be bootstrapped """ if not self._classpath: self._classpath = self._bootstrap_ivy_classpath(workunit_factory) return self._classpath
def function[_get_classpath, parameter[self, workunit_factory]]: constant[Returns the bootstrapped ivy classpath as a list of jar paths. :raises: Bootstrapper.Error if the classpath could not be bootstrapped ] if <ast.UnaryOp object at 0x7da1b1e5f5e0> begin[:] name[self]._classpath assign[=] call[name[self]._bootstrap_ivy_classpath, parameter[name[workunit_factory]]] return[name[self]._classpath]
keyword[def] identifier[_get_classpath] ( identifier[self] , identifier[workunit_factory] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_classpath] : identifier[self] . identifier[_classpath] = identifier[self] . identifier[_bootstrap_ivy_classpath] ( identifier[workunit_factory] ) keyword[return] identifier[self] . identifier[_classpath]
def _get_classpath(self, workunit_factory): """Returns the bootstrapped ivy classpath as a list of jar paths. :raises: Bootstrapper.Error if the classpath could not be bootstrapped """ if not self._classpath: self._classpath = self._bootstrap_ivy_classpath(workunit_factory) # depends on [control=['if'], data=[]] return self._classpath
def register_handler(self, handler): """ Register a new namespace handler. """ self._handlers[handler.namespace] = handler handler.registered(self)
def function[register_handler, parameter[self, handler]]: constant[ Register a new namespace handler. ] call[name[self]._handlers][name[handler].namespace] assign[=] name[handler] call[name[handler].registered, parameter[name[self]]]
keyword[def] identifier[register_handler] ( identifier[self] , identifier[handler] ): literal[string] identifier[self] . identifier[_handlers] [ identifier[handler] . identifier[namespace] ]= identifier[handler] identifier[handler] . identifier[registered] ( identifier[self] )
def register_handler(self, handler): """ Register a new namespace handler. """ self._handlers[handler.namespace] = handler handler.registered(self)
def nlmsg_find_attr(nlh, hdrlen, attrtype): """Find a specific attribute in a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L231 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). attrtype -- type of attribute to look for (integer). Returns: The first attribute which matches the specified type (nlattr class instance). """ return nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), attrtype)
def function[nlmsg_find_attr, parameter[nlh, hdrlen, attrtype]]: constant[Find a specific attribute in a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L231 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). attrtype -- type of attribute to look for (integer). Returns: The first attribute which matches the specified type (nlattr class instance). ] return[call[name[nla_find], parameter[call[name[nlmsg_attrdata], parameter[name[nlh], name[hdrlen]]], call[name[nlmsg_attrlen], parameter[name[nlh], name[hdrlen]]], name[attrtype]]]]
keyword[def] identifier[nlmsg_find_attr] ( identifier[nlh] , identifier[hdrlen] , identifier[attrtype] ): literal[string] keyword[return] identifier[nla_find] ( identifier[nlmsg_attrdata] ( identifier[nlh] , identifier[hdrlen] ), identifier[nlmsg_attrlen] ( identifier[nlh] , identifier[hdrlen] ), identifier[attrtype] )
def nlmsg_find_attr(nlh, hdrlen, attrtype): """Find a specific attribute in a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L231 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). attrtype -- type of attribute to look for (integer). Returns: The first attribute which matches the specified type (nlattr class instance). """ return nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), attrtype)
def _cumcount_array(self, ascending=True): """ Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Notes ----- this is currently implementing sort=False (though the default is sort=True) for groupby in general """ ids, _, ngroups = self.grouper.group_info sorter = get_group_index_sorter(ids, ngroups) ids, count = ids[sorter], len(ids) if count == 0: return np.empty(0, dtype=np.int64) run = np.r_[True, ids[:-1] != ids[1:]] rep = np.diff(np.r_[np.nonzero(run)[0], count]) out = (~run).cumsum() if ascending: out -= np.repeat(out[run], rep) else: out = np.repeat(out[np.r_[run[1:], True]], rep) - out rev = np.empty(count, dtype=np.intp) rev[sorter] = np.arange(count, dtype=np.intp) return out[rev].astype(np.int64, copy=False)
def function[_cumcount_array, parameter[self, ascending]]: constant[ Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Notes ----- this is currently implementing sort=False (though the default is sort=True) for groupby in general ] <ast.Tuple object at 0x7da18eb55ff0> assign[=] name[self].grouper.group_info variable[sorter] assign[=] call[name[get_group_index_sorter], parameter[name[ids], name[ngroups]]] <ast.Tuple object at 0x7da18eb562f0> assign[=] tuple[[<ast.Subscript object at 0x7da18eb576a0>, <ast.Call object at 0x7da18eb54c10>]] if compare[name[count] equal[==] constant[0]] begin[:] return[call[name[np].empty, parameter[constant[0]]]] variable[run] assign[=] call[name[np].r_][tuple[[<ast.Constant object at 0x7da18eb57670>, <ast.Compare object at 0x7da18eb55cf0>]]] variable[rep] assign[=] call[name[np].diff, parameter[call[name[np].r_][tuple[[<ast.Subscript object at 0x7da18eb54d60>, <ast.Name object at 0x7da18eb56ce0>]]]]] variable[out] assign[=] call[<ast.UnaryOp object at 0x7da204345030>.cumsum, parameter[]] if name[ascending] begin[:] <ast.AugAssign object at 0x7da204346350> variable[rev] assign[=] call[name[np].empty, parameter[name[count]]] call[name[rev]][name[sorter]] assign[=] call[name[np].arange, parameter[name[count]]] return[call[call[name[out]][name[rev]].astype, parameter[name[np].int64]]]
keyword[def] identifier[_cumcount_array] ( identifier[self] , identifier[ascending] = keyword[True] ): literal[string] identifier[ids] , identifier[_] , identifier[ngroups] = identifier[self] . identifier[grouper] . identifier[group_info] identifier[sorter] = identifier[get_group_index_sorter] ( identifier[ids] , identifier[ngroups] ) identifier[ids] , identifier[count] = identifier[ids] [ identifier[sorter] ], identifier[len] ( identifier[ids] ) keyword[if] identifier[count] == literal[int] : keyword[return] identifier[np] . identifier[empty] ( literal[int] , identifier[dtype] = identifier[np] . identifier[int64] ) identifier[run] = identifier[np] . identifier[r_] [ keyword[True] , identifier[ids] [:- literal[int] ]!= identifier[ids] [ literal[int] :]] identifier[rep] = identifier[np] . identifier[diff] ( identifier[np] . identifier[r_] [ identifier[np] . identifier[nonzero] ( identifier[run] )[ literal[int] ], identifier[count] ]) identifier[out] =(~ identifier[run] ). identifier[cumsum] () keyword[if] identifier[ascending] : identifier[out] -= identifier[np] . identifier[repeat] ( identifier[out] [ identifier[run] ], identifier[rep] ) keyword[else] : identifier[out] = identifier[np] . identifier[repeat] ( identifier[out] [ identifier[np] . identifier[r_] [ identifier[run] [ literal[int] :], keyword[True] ]], identifier[rep] )- identifier[out] identifier[rev] = identifier[np] . identifier[empty] ( identifier[count] , identifier[dtype] = identifier[np] . identifier[intp] ) identifier[rev] [ identifier[sorter] ]= identifier[np] . identifier[arange] ( identifier[count] , identifier[dtype] = identifier[np] . identifier[intp] ) keyword[return] identifier[out] [ identifier[rev] ]. identifier[astype] ( identifier[np] . identifier[int64] , identifier[copy] = keyword[False] )
def _cumcount_array(self, ascending=True): """ Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Notes ----- this is currently implementing sort=False (though the default is sort=True) for groupby in general """ (ids, _, ngroups) = self.grouper.group_info sorter = get_group_index_sorter(ids, ngroups) (ids, count) = (ids[sorter], len(ids)) if count == 0: return np.empty(0, dtype=np.int64) # depends on [control=['if'], data=[]] run = np.r_[True, ids[:-1] != ids[1:]] rep = np.diff(np.r_[np.nonzero(run)[0], count]) out = (~run).cumsum() if ascending: out -= np.repeat(out[run], rep) # depends on [control=['if'], data=[]] else: out = np.repeat(out[np.r_[run[1:], True]], rep) - out rev = np.empty(count, dtype=np.intp) rev[sorter] = np.arange(count, dtype=np.intp) return out[rev].astype(np.int64, copy=False)
def execute(self, resource, **kw): """ Execute the task and return a TaskOperationPoller. :rtype: TaskOperationPoller """ params = kw.pop('params', {}) json = kw.pop('json', None) task = self.make_request( TaskRunFailed, method='create', params=params, json=json, resource=resource) timeout = kw.pop('timeout', 5) wait_for_finish = kw.pop('wait_for_finish', True) return TaskOperationPoller( task=task, timeout=timeout, wait_for_finish=wait_for_finish, **kw)
def function[execute, parameter[self, resource]]: constant[ Execute the task and return a TaskOperationPoller. :rtype: TaskOperationPoller ] variable[params] assign[=] call[name[kw].pop, parameter[constant[params], dictionary[[], []]]] variable[json] assign[=] call[name[kw].pop, parameter[constant[json], constant[None]]] variable[task] assign[=] call[name[self].make_request, parameter[name[TaskRunFailed]]] variable[timeout] assign[=] call[name[kw].pop, parameter[constant[timeout], constant[5]]] variable[wait_for_finish] assign[=] call[name[kw].pop, parameter[constant[wait_for_finish], constant[True]]] return[call[name[TaskOperationPoller], parameter[]]]
keyword[def] identifier[execute] ( identifier[self] , identifier[resource] ,** identifier[kw] ): literal[string] identifier[params] = identifier[kw] . identifier[pop] ( literal[string] ,{}) identifier[json] = identifier[kw] . identifier[pop] ( literal[string] , keyword[None] ) identifier[task] = identifier[self] . identifier[make_request] ( identifier[TaskRunFailed] , identifier[method] = literal[string] , identifier[params] = identifier[params] , identifier[json] = identifier[json] , identifier[resource] = identifier[resource] ) identifier[timeout] = identifier[kw] . identifier[pop] ( literal[string] , literal[int] ) identifier[wait_for_finish] = identifier[kw] . identifier[pop] ( literal[string] , keyword[True] ) keyword[return] identifier[TaskOperationPoller] ( identifier[task] = identifier[task] , identifier[timeout] = identifier[timeout] , identifier[wait_for_finish] = identifier[wait_for_finish] , ** identifier[kw] )
def execute(self, resource, **kw): """ Execute the task and return a TaskOperationPoller. :rtype: TaskOperationPoller """ params = kw.pop('params', {}) json = kw.pop('json', None) task = self.make_request(TaskRunFailed, method='create', params=params, json=json, resource=resource) timeout = kw.pop('timeout', 5) wait_for_finish = kw.pop('wait_for_finish', True) return TaskOperationPoller(task=task, timeout=timeout, wait_for_finish=wait_for_finish, **kw)
def get_metric_values(self): """ Get the faked metrics, for all metric groups and all resources that have been prepared on the manager object of this context object. Returns: iterable of tuple (group_name, iterable of values): The faked metrics, in the order they had been added, where: group_name (string): Metric group name. values (:class:~zhmcclient.FakedMetricObjectValues`): The metric values for one resource at one point in time. """ group_names = self.properties.get('metric-groups', None) if not group_names: group_names = self.manager.get_metric_values_group_names() ret = [] for group_name in group_names: try: mo_val = self.manager.get_metric_values(group_name) ret_item = (group_name, mo_val) ret.append(ret_item) except ValueError: pass # ignore metric groups without metric values return ret
def function[get_metric_values, parameter[self]]: constant[ Get the faked metrics, for all metric groups and all resources that have been prepared on the manager object of this context object. Returns: iterable of tuple (group_name, iterable of values): The faked metrics, in the order they had been added, where: group_name (string): Metric group name. values (:class:~zhmcclient.FakedMetricObjectValues`): The metric values for one resource at one point in time. ] variable[group_names] assign[=] call[name[self].properties.get, parameter[constant[metric-groups], constant[None]]] if <ast.UnaryOp object at 0x7da18bc72230> begin[:] variable[group_names] assign[=] call[name[self].manager.get_metric_values_group_names, parameter[]] variable[ret] assign[=] list[[]] for taget[name[group_name]] in starred[name[group_names]] begin[:] <ast.Try object at 0x7da18bc704c0> return[name[ret]]
keyword[def] identifier[get_metric_values] ( identifier[self] ): literal[string] identifier[group_names] = identifier[self] . identifier[properties] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] keyword[not] identifier[group_names] : identifier[group_names] = identifier[self] . identifier[manager] . identifier[get_metric_values_group_names] () identifier[ret] =[] keyword[for] identifier[group_name] keyword[in] identifier[group_names] : keyword[try] : identifier[mo_val] = identifier[self] . identifier[manager] . identifier[get_metric_values] ( identifier[group_name] ) identifier[ret_item] =( identifier[group_name] , identifier[mo_val] ) identifier[ret] . identifier[append] ( identifier[ret_item] ) keyword[except] identifier[ValueError] : keyword[pass] keyword[return] identifier[ret]
def get_metric_values(self): """ Get the faked metrics, for all metric groups and all resources that have been prepared on the manager object of this context object. Returns: iterable of tuple (group_name, iterable of values): The faked metrics, in the order they had been added, where: group_name (string): Metric group name. values (:class:~zhmcclient.FakedMetricObjectValues`): The metric values for one resource at one point in time. """ group_names = self.properties.get('metric-groups', None) if not group_names: group_names = self.manager.get_metric_values_group_names() # depends on [control=['if'], data=[]] ret = [] for group_name in group_names: try: mo_val = self.manager.get_metric_values(group_name) ret_item = (group_name, mo_val) ret.append(ret_item) # depends on [control=['try'], data=[]] except ValueError: pass # ignore metric groups without metric values # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['group_name']] return ret
def set_proxy (self, proxy): """Parse given proxy information and store parsed values. Note that only http:// proxies are supported, both for ftp:// and http:// URLs. """ self.proxy = proxy self.proxytype = "http" self.proxyauth = None if not self.proxy: return proxyurl = urlparse.urlparse(self.proxy) self.proxytype = proxyurl.scheme if self.proxytype not in ('http', 'https'): # Note that invalid proxies might raise TypeError in urllib2, # so make sure to stop checking at this point, not later. msg = _("Proxy value `%(proxy)s' must start with 'http:' or 'https:'.") \ % dict(proxy=proxy) raise LinkCheckerError(msg) if self.ignore_proxy_host(): # log proxy without auth info log.debug(LOG_CHECK, "ignoring proxy %r", self.proxy) self.add_info(_("Ignoring proxy setting `%(proxy)s'.") % dict(proxy=proxy)) self.proxy = None return log.debug(LOG_CHECK, "using proxy %r", self.proxy) self.add_info(_("Using proxy `%(proxy)s'.") % dict(proxy=self.proxy)) self.proxyhost = proxyurl.hostname self.proxyport = proxyurl.port if proxyurl.username is not None: username = proxyurl.username password = proxyurl.password if proxy.password is not None else "" auth = "%s:%s" % (username, password) self.proxyauth = "Basic "+httputil.encode_base64(auth)
def function[set_proxy, parameter[self, proxy]]: constant[Parse given proxy information and store parsed values. Note that only http:// proxies are supported, both for ftp:// and http:// URLs. ] name[self].proxy assign[=] name[proxy] name[self].proxytype assign[=] constant[http] name[self].proxyauth assign[=] constant[None] if <ast.UnaryOp object at 0x7da18dc04850> begin[:] return[None] variable[proxyurl] assign[=] call[name[urlparse].urlparse, parameter[name[self].proxy]] name[self].proxytype assign[=] name[proxyurl].scheme if compare[name[self].proxytype <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20e956500>, <ast.Constant object at 0x7da20e954700>]]] begin[:] variable[msg] assign[=] binary_operation[call[name[_], parameter[constant[Proxy value `%(proxy)s' must start with 'http:' or 'https:'.]]] <ast.Mod object at 0x7da2590d6920> call[name[dict], parameter[]]] <ast.Raise object at 0x7da20e9578b0> if call[name[self].ignore_proxy_host, parameter[]] begin[:] call[name[log].debug, parameter[name[LOG_CHECK], constant[ignoring proxy %r], name[self].proxy]] call[name[self].add_info, parameter[binary_operation[call[name[_], parameter[constant[Ignoring proxy setting `%(proxy)s'.]]] <ast.Mod object at 0x7da2590d6920> call[name[dict], parameter[]]]]] name[self].proxy assign[=] constant[None] return[None] call[name[log].debug, parameter[name[LOG_CHECK], constant[using proxy %r], name[self].proxy]] call[name[self].add_info, parameter[binary_operation[call[name[_], parameter[constant[Using proxy `%(proxy)s'.]]] <ast.Mod object at 0x7da2590d6920> call[name[dict], parameter[]]]]] name[self].proxyhost assign[=] name[proxyurl].hostname name[self].proxyport assign[=] name[proxyurl].port if compare[name[proxyurl].username is_not constant[None]] begin[:] variable[username] assign[=] name[proxyurl].username variable[password] assign[=] <ast.IfExp object at 0x7da18fe905e0> variable[auth] assign[=] binary_operation[constant[%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18fe92bf0>, <ast.Name object at 0x7da18fe927a0>]]] name[self].proxyauth assign[=] binary_operation[constant[Basic ] + call[name[httputil].encode_base64, parameter[name[auth]]]]
keyword[def] identifier[set_proxy] ( identifier[self] , identifier[proxy] ): literal[string] identifier[self] . identifier[proxy] = identifier[proxy] identifier[self] . identifier[proxytype] = literal[string] identifier[self] . identifier[proxyauth] = keyword[None] keyword[if] keyword[not] identifier[self] . identifier[proxy] : keyword[return] identifier[proxyurl] = identifier[urlparse] . identifier[urlparse] ( identifier[self] . identifier[proxy] ) identifier[self] . identifier[proxytype] = identifier[proxyurl] . identifier[scheme] keyword[if] identifier[self] . identifier[proxytype] keyword[not] keyword[in] ( literal[string] , literal[string] ): identifier[msg] = identifier[_] ( literal[string] )% identifier[dict] ( identifier[proxy] = identifier[proxy] ) keyword[raise] identifier[LinkCheckerError] ( identifier[msg] ) keyword[if] identifier[self] . identifier[ignore_proxy_host] (): identifier[log] . identifier[debug] ( identifier[LOG_CHECK] , literal[string] , identifier[self] . identifier[proxy] ) identifier[self] . identifier[add_info] ( identifier[_] ( literal[string] )% identifier[dict] ( identifier[proxy] = identifier[proxy] )) identifier[self] . identifier[proxy] = keyword[None] keyword[return] identifier[log] . identifier[debug] ( identifier[LOG_CHECK] , literal[string] , identifier[self] . identifier[proxy] ) identifier[self] . identifier[add_info] ( identifier[_] ( literal[string] )% identifier[dict] ( identifier[proxy] = identifier[self] . identifier[proxy] )) identifier[self] . identifier[proxyhost] = identifier[proxyurl] . identifier[hostname] identifier[self] . identifier[proxyport] = identifier[proxyurl] . identifier[port] keyword[if] identifier[proxyurl] . identifier[username] keyword[is] keyword[not] keyword[None] : identifier[username] = identifier[proxyurl] . identifier[username] identifier[password] = identifier[proxyurl] . identifier[password] keyword[if] identifier[proxy] . identifier[password] keyword[is] keyword[not] keyword[None] keyword[else] literal[string] identifier[auth] = literal[string] %( identifier[username] , identifier[password] ) identifier[self] . identifier[proxyauth] = literal[string] + identifier[httputil] . identifier[encode_base64] ( identifier[auth] )
def set_proxy(self, proxy): """Parse given proxy information and store parsed values. Note that only http:// proxies are supported, both for ftp:// and http:// URLs. """ self.proxy = proxy self.proxytype = 'http' self.proxyauth = None if not self.proxy: return # depends on [control=['if'], data=[]] proxyurl = urlparse.urlparse(self.proxy) self.proxytype = proxyurl.scheme if self.proxytype not in ('http', 'https'): # Note that invalid proxies might raise TypeError in urllib2, # so make sure to stop checking at this point, not later. msg = _("Proxy value `%(proxy)s' must start with 'http:' or 'https:'.") % dict(proxy=proxy) raise LinkCheckerError(msg) # depends on [control=['if'], data=[]] if self.ignore_proxy_host(): # log proxy without auth info log.debug(LOG_CHECK, 'ignoring proxy %r', self.proxy) self.add_info(_("Ignoring proxy setting `%(proxy)s'.") % dict(proxy=proxy)) self.proxy = None return # depends on [control=['if'], data=[]] log.debug(LOG_CHECK, 'using proxy %r', self.proxy) self.add_info(_("Using proxy `%(proxy)s'.") % dict(proxy=self.proxy)) self.proxyhost = proxyurl.hostname self.proxyport = proxyurl.port if proxyurl.username is not None: username = proxyurl.username password = proxyurl.password if proxy.password is not None else '' auth = '%s:%s' % (username, password) self.proxyauth = 'Basic ' + httputil.encode_base64(auth) # depends on [control=['if'], data=[]]
def html_elem(e, ct, withtype=False): """ Format a result element as an HTML table cell. @param e (list): a pair \c (value,type) @param ct (str): cell type (th or td) @param withtype (bool): add an additional cell with the element type """ # Header cell if ct == 'th': return '<th>{0}</th><th>{1}</th>'.format(*e) if withtype else '<th>{}</th>'.format(e) # Content cell if e[1] in ('uri', 'URIRef'): html = u'<{0} class=val><a href="{1}" target="_other">{2}</a></{0}>'.format(ct, e[0], escape(e[0])) else: html = u'<{0} class=val>{1}</{0}>'.format(ct, escape(e[0])) # Create the optional cell for the type if withtype: html += u'<{0} class=typ>{1}</{0}>'.format(ct, e[1]) return html
def function[html_elem, parameter[e, ct, withtype]]: constant[ Format a result element as an HTML table cell. @param e (list): a pair \c (value,type) @param ct (str): cell type (th or td) @param withtype (bool): add an additional cell with the element type ] if compare[name[ct] equal[==] constant[th]] begin[:] return[<ast.IfExp object at 0x7da1b26af5b0>] if compare[call[name[e]][constant[1]] in tuple[[<ast.Constant object at 0x7da18f8134c0>, <ast.Constant object at 0x7da18f8100d0>]]] begin[:] variable[html] assign[=] call[constant[<{0} class=val><a href="{1}" target="_other">{2}</a></{0}>].format, parameter[name[ct], call[name[e]][constant[0]], call[name[escape], parameter[call[name[e]][constant[0]]]]]] if name[withtype] begin[:] <ast.AugAssign object at 0x7da18f812950> return[name[html]]
keyword[def] identifier[html_elem] ( identifier[e] , identifier[ct] , identifier[withtype] = keyword[False] ): literal[string] keyword[if] identifier[ct] == literal[string] : keyword[return] literal[string] . identifier[format] (* identifier[e] ) keyword[if] identifier[withtype] keyword[else] literal[string] . identifier[format] ( identifier[e] ) keyword[if] identifier[e] [ literal[int] ] keyword[in] ( literal[string] , literal[string] ): identifier[html] = literal[string] . identifier[format] ( identifier[ct] , identifier[e] [ literal[int] ], identifier[escape] ( identifier[e] [ literal[int] ])) keyword[else] : identifier[html] = literal[string] . identifier[format] ( identifier[ct] , identifier[escape] ( identifier[e] [ literal[int] ])) keyword[if] identifier[withtype] : identifier[html] += literal[string] . identifier[format] ( identifier[ct] , identifier[e] [ literal[int] ]) keyword[return] identifier[html]
def html_elem(e, ct, withtype=False): """ Format a result element as an HTML table cell. @param e (list): a pair \\c (value,type) @param ct (str): cell type (th or td) @param withtype (bool): add an additional cell with the element type """ # Header cell if ct == 'th': return '<th>{0}</th><th>{1}</th>'.format(*e) if withtype else '<th>{}</th>'.format(e) # depends on [control=['if'], data=[]] # Content cell if e[1] in ('uri', 'URIRef'): html = u'<{0} class=val><a href="{1}" target="_other">{2}</a></{0}>'.format(ct, e[0], escape(e[0])) # depends on [control=['if'], data=[]] else: html = u'<{0} class=val>{1}</{0}>'.format(ct, escape(e[0])) # Create the optional cell for the type if withtype: html += u'<{0} class=typ>{1}</{0}>'.format(ct, e[1]) # depends on [control=['if'], data=[]] return html
def plot_prior_dates(self, dwidth=30, ax=None): """Plot prior chronology dates in age-depth plot""" if ax is None: ax = plt.gca() depth, probs = self.prior_dates() pat = [] for i, d in enumerate(depth): p = probs[i] z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200) zp = np.interp(x=zy, xp=z[0], fp=z[1]) pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]), np.concatenate([zy, zy[::-1]])]) pat.append(Polygon(pol.T)) p = PatchCollection(pat) p.set_label('Prior dates') ax.add_collection(p) ax.autoscale_view() ax.set_ylabel('Age (cal yr BP)') ax.set_xlabel('Depth (cm)') ax.grid(True) return ax
def function[plot_prior_dates, parameter[self, dwidth, ax]]: constant[Plot prior chronology dates in age-depth plot] if compare[name[ax] is constant[None]] begin[:] variable[ax] assign[=] call[name[plt].gca, parameter[]] <ast.Tuple object at 0x7da2045659c0> assign[=] call[name[self].prior_dates, parameter[]] variable[pat] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da20c795ff0>, <ast.Name object at 0x7da20c7940a0>]]] in starred[call[name[enumerate], parameter[name[depth]]]] begin[:] variable[p] assign[=] call[name[probs]][name[i]] variable[z] assign[=] call[name[np].array, parameter[list[[<ast.Subscript object at 0x7da20c794130>, <ast.BinOp object at 0x7da20c796260>]]]] variable[z] assign[=] call[name[z]][tuple[[<ast.Slice object at 0x7da20c794790>, <ast.Call object at 0x7da20c795570>]]] variable[zy] assign[=] call[name[np].linspace, parameter[call[name[np].min, parameter[call[name[z]][constant[0]]]], call[name[np].max, parameter[call[name[z]][constant[0]]]]]] variable[zp] assign[=] call[name[np].interp, parameter[]] variable[pol] assign[=] call[name[np].vstack, parameter[list[[<ast.Call object at 0x7da20c795900>, <ast.Call object at 0x7da20c794a90>]]]] call[name[pat].append, parameter[call[name[Polygon], parameter[name[pol].T]]]] variable[p] assign[=] call[name[PatchCollection], parameter[name[pat]]] call[name[p].set_label, parameter[constant[Prior dates]]] call[name[ax].add_collection, parameter[name[p]]] call[name[ax].autoscale_view, parameter[]] call[name[ax].set_ylabel, parameter[constant[Age (cal yr BP)]]] call[name[ax].set_xlabel, parameter[constant[Depth (cm)]]] call[name[ax].grid, parameter[constant[True]]] return[name[ax]]
keyword[def] identifier[plot_prior_dates] ( identifier[self] , identifier[dwidth] = literal[int] , identifier[ax] = keyword[None] ): literal[string] keyword[if] identifier[ax] keyword[is] keyword[None] : identifier[ax] = identifier[plt] . identifier[gca] () identifier[depth] , identifier[probs] = identifier[self] . identifier[prior_dates] () identifier[pat] =[] keyword[for] identifier[i] , identifier[d] keyword[in] identifier[enumerate] ( identifier[depth] ): identifier[p] = identifier[probs] [ identifier[i] ] identifier[z] = identifier[np] . identifier[array] ([ identifier[p] [:, literal[int] ], identifier[dwidth] * identifier[p] [:, literal[int] ]/ identifier[np] . identifier[sum] ( identifier[p] [:, literal[int] ])]) identifier[z] = identifier[z] [:, identifier[z] [ literal[int] ]. identifier[argsort] ( identifier[kind] = literal[string] )] identifier[zy] = identifier[np] . identifier[linspace] ( identifier[np] . identifier[min] ( identifier[z] [ literal[int] ]), identifier[np] . identifier[max] ( identifier[z] [ literal[int] ]), identifier[num] = literal[int] ) identifier[zp] = identifier[np] . identifier[interp] ( identifier[x] = identifier[zy] , identifier[xp] = identifier[z] [ literal[int] ], identifier[fp] = identifier[z] [ literal[int] ]) identifier[pol] = identifier[np] . identifier[vstack] ([ identifier[np] . identifier[concatenate] ([ identifier[d] + identifier[zp] , identifier[d] - identifier[zp] [::- literal[int] ]]), identifier[np] . identifier[concatenate] ([ identifier[zy] , identifier[zy] [::- literal[int] ]])]) identifier[pat] . identifier[append] ( identifier[Polygon] ( identifier[pol] . identifier[T] )) identifier[p] = identifier[PatchCollection] ( identifier[pat] ) identifier[p] . identifier[set_label] ( literal[string] ) identifier[ax] . identifier[add_collection] ( identifier[p] ) identifier[ax] . identifier[autoscale_view] () identifier[ax] . identifier[set_ylabel] ( literal[string] ) identifier[ax] . identifier[set_xlabel] ( literal[string] ) identifier[ax] . identifier[grid] ( keyword[True] ) keyword[return] identifier[ax]
def plot_prior_dates(self, dwidth=30, ax=None): """Plot prior chronology dates in age-depth plot""" if ax is None: ax = plt.gca() # depends on [control=['if'], data=['ax']] (depth, probs) = self.prior_dates() pat = [] for (i, d) in enumerate(depth): p = probs[i] z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200) zp = np.interp(x=zy, xp=z[0], fp=z[1]) pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]), np.concatenate([zy, zy[::-1]])]) pat.append(Polygon(pol.T)) # depends on [control=['for'], data=[]] p = PatchCollection(pat) p.set_label('Prior dates') ax.add_collection(p) ax.autoscale_view() ax.set_ylabel('Age (cal yr BP)') ax.set_xlabel('Depth (cm)') ax.grid(True) return ax
def get_content(self, offset, size): """Return the specified number of bytes from the current section.""" return _bfd.section_get_content(self.bfd, self._ptr, offset, size)
def function[get_content, parameter[self, offset, size]]: constant[Return the specified number of bytes from the current section.] return[call[name[_bfd].section_get_content, parameter[name[self].bfd, name[self]._ptr, name[offset], name[size]]]]
keyword[def] identifier[get_content] ( identifier[self] , identifier[offset] , identifier[size] ): literal[string] keyword[return] identifier[_bfd] . identifier[section_get_content] ( identifier[self] . identifier[bfd] , identifier[self] . identifier[_ptr] , identifier[offset] , identifier[size] )
def get_content(self, offset, size): """Return the specified number of bytes from the current section.""" return _bfd.section_get_content(self.bfd, self._ptr, offset, size)
def whisper(self, peer, msg_p): """ Send message to single peer, specified as a UUID string Destroys message after sending """ return lib.zyre_whisper(self._as_parameter_, peer, byref(czmq.zmsg_p.from_param(msg_p)))
def function[whisper, parameter[self, peer, msg_p]]: constant[ Send message to single peer, specified as a UUID string Destroys message after sending ] return[call[name[lib].zyre_whisper, parameter[name[self]._as_parameter_, name[peer], call[name[byref], parameter[call[name[czmq].zmsg_p.from_param, parameter[name[msg_p]]]]]]]]
keyword[def] identifier[whisper] ( identifier[self] , identifier[peer] , identifier[msg_p] ): literal[string] keyword[return] identifier[lib] . identifier[zyre_whisper] ( identifier[self] . identifier[_as_parameter_] , identifier[peer] , identifier[byref] ( identifier[czmq] . identifier[zmsg_p] . identifier[from_param] ( identifier[msg_p] )))
def whisper(self, peer, msg_p): """ Send message to single peer, specified as a UUID string Destroys message after sending """ return lib.zyre_whisper(self._as_parameter_, peer, byref(czmq.zmsg_p.from_param(msg_p)))
def get_m2m_value(session, request, obj): """ Set m2m value for model obj from request params like "group[]" :Parameters: - `session`: SQLAlchemy DBSession - `request`: request as dict - `obj`: model instance """ params = {} ''' m2m_request: {u'company[]': [u'["id", 1]'], u'professions[]': [u'["id", 2]', u'["id", 3]']} ''' m2m_request = {k: v for k, v in list(request.items()) if k.endswith('[]')} for k, v in list(m2m_request.items()): key = k[:-2] relation = getattr(obj.__class__, key, False) if not relation: relation = getattr(obj, key, False) if not relation: continue # pragma: no cover value = get_m2m_objs(session, relation, v) if value is None: continue if relation.property.uselist is False: if value: value = value[0] else: value = None params[key] = value return params
def function[get_m2m_value, parameter[session, request, obj]]: constant[ Set m2m value for model obj from request params like "group[]" :Parameters: - `session`: SQLAlchemy DBSession - `request`: request as dict - `obj`: model instance ] variable[params] assign[=] dictionary[[], []] constant[ m2m_request: {u'company[]': [u'["id", 1]'], u'professions[]': [u'["id", 2]', u'["id", 3]']} ] variable[m2m_request] assign[=] <ast.DictComp object at 0x7da2054a4be0> for taget[tuple[[<ast.Name object at 0x7da20e9b2200>, <ast.Name object at 0x7da20e9b3100>]]] in starred[call[name[list], parameter[call[name[m2m_request].items, parameter[]]]]] begin[:] variable[key] assign[=] call[name[k]][<ast.Slice object at 0x7da20e9b2b60>] variable[relation] assign[=] call[name[getattr], parameter[name[obj].__class__, name[key], constant[False]]] if <ast.UnaryOp object at 0x7da20e9b3c70> begin[:] variable[relation] assign[=] call[name[getattr], parameter[name[obj], name[key], constant[False]]] if <ast.UnaryOp object at 0x7da20e9b2c20> begin[:] continue variable[value] assign[=] call[name[get_m2m_objs], parameter[name[session], name[relation], name[v]]] if compare[name[value] is constant[None]] begin[:] continue if compare[name[relation].property.uselist is constant[False]] begin[:] if name[value] begin[:] variable[value] assign[=] call[name[value]][constant[0]] call[name[params]][name[key]] assign[=] name[value] return[name[params]]
keyword[def] identifier[get_m2m_value] ( identifier[session] , identifier[request] , identifier[obj] ): literal[string] identifier[params] ={} literal[string] identifier[m2m_request] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[list] ( identifier[request] . identifier[items] ()) keyword[if] identifier[k] . identifier[endswith] ( literal[string] )} keyword[for] identifier[k] , identifier[v] keyword[in] identifier[list] ( identifier[m2m_request] . identifier[items] ()): identifier[key] = identifier[k] [:- literal[int] ] identifier[relation] = identifier[getattr] ( identifier[obj] . identifier[__class__] , identifier[key] , keyword[False] ) keyword[if] keyword[not] identifier[relation] : identifier[relation] = identifier[getattr] ( identifier[obj] , identifier[key] , keyword[False] ) keyword[if] keyword[not] identifier[relation] : keyword[continue] identifier[value] = identifier[get_m2m_objs] ( identifier[session] , identifier[relation] , identifier[v] ) keyword[if] identifier[value] keyword[is] keyword[None] : keyword[continue] keyword[if] identifier[relation] . identifier[property] . identifier[uselist] keyword[is] keyword[False] : keyword[if] identifier[value] : identifier[value] = identifier[value] [ literal[int] ] keyword[else] : identifier[value] = keyword[None] identifier[params] [ identifier[key] ]= identifier[value] keyword[return] identifier[params]
def get_m2m_value(session, request, obj): """ Set m2m value for model obj from request params like "group[]" :Parameters: - `session`: SQLAlchemy DBSession - `request`: request as dict - `obj`: model instance """ params = {} ' m2m_request:\n\n {u\'company[]\': [u\'["id", 1]\'],\n u\'professions[]\': [u\'["id", 2]\', u\'["id", 3]\']}\n ' m2m_request = {k: v for (k, v) in list(request.items()) if k.endswith('[]')} for (k, v) in list(m2m_request.items()): key = k[:-2] relation = getattr(obj.__class__, key, False) if not relation: relation = getattr(obj, key, False) # depends on [control=['if'], data=[]] if not relation: continue # pragma: no cover # depends on [control=['if'], data=[]] value = get_m2m_objs(session, relation, v) if value is None: continue # depends on [control=['if'], data=[]] if relation.property.uselist is False: if value: value = value[0] # depends on [control=['if'], data=[]] else: value = None # depends on [control=['if'], data=[]] params[key] = value # depends on [control=['for'], data=[]] return params
def sqrt_rc_imp(Ns,alpha,M=6): """ A truncated square root raised cosine pulse used in digital communications. The pulse shaping factor :math:`0 < \\alpha < 1` is required as well as the truncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`. Parameters ---------- Ns : number of samples per symbol alpha : excess bandwidth factor on (0, 1), e.g., 0.35 M : equals RC one-sided symbol truncation factor Returns ------- b : ndarray containing the pulse shape Notes ----- The pulse shape b is typically used as the FIR filter coefficients when forming a pulse shaped digital communications waveform. When square root raised cosine (SRC) pulse is used to generate Tx signals and at the receiver used as a matched filter (receiver FIR filter), the received signal is now raised cosine shaped, thus having zero intersymbol interference and the optimum removal of additive white noise if present at the receiver input. Examples -------- Ten samples per symbol and :math:`\\alpha = 0.35`. >>> import matplotlib.pyplot as plt >>> from numpy import arange >>> from sk_dsp_comm.digitalcom import sqrt_rc_imp >>> b = sqrt_rc_imp(10,0.35) >>> n = arange(-10*6,10*6+1) >>> plt.stem(n,b) >>> plt.show() """ # Design the filter n = np.arange(-M*Ns,M*Ns+1) b = np.zeros(len(n)) Ns *= 1.0 a = alpha for i in range(len(n)): if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2: b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a))) else: b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2)) b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a)) return b
def function[sqrt_rc_imp, parameter[Ns, alpha, M]]: constant[ A truncated square root raised cosine pulse used in digital communications. The pulse shaping factor :math:`0 < \alpha < 1` is required as well as the truncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`. Parameters ---------- Ns : number of samples per symbol alpha : excess bandwidth factor on (0, 1), e.g., 0.35 M : equals RC one-sided symbol truncation factor Returns ------- b : ndarray containing the pulse shape Notes ----- The pulse shape b is typically used as the FIR filter coefficients when forming a pulse shaped digital communications waveform. When square root raised cosine (SRC) pulse is used to generate Tx signals and at the receiver used as a matched filter (receiver FIR filter), the received signal is now raised cosine shaped, thus having zero intersymbol interference and the optimum removal of additive white noise if present at the receiver input. Examples -------- Ten samples per symbol and :math:`\alpha = 0.35`. >>> import matplotlib.pyplot as plt >>> from numpy import arange >>> from sk_dsp_comm.digitalcom import sqrt_rc_imp >>> b = sqrt_rc_imp(10,0.35) >>> n = arange(-10*6,10*6+1) >>> plt.stem(n,b) >>> plt.show() ] variable[n] assign[=] call[name[np].arange, parameter[binary_operation[<ast.UnaryOp object at 0x7da18f58e620> * name[Ns]], binary_operation[binary_operation[name[M] * name[Ns]] + constant[1]]]] variable[b] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[n]]]]] <ast.AugAssign object at 0x7da18f58e1d0> variable[a] assign[=] name[alpha] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[n]]]]]] begin[:] if compare[call[name[abs], parameter[binary_operation[constant[1] - binary_operation[binary_operation[constant[16] * binary_operation[name[a] ** constant[2]]] * binary_operation[binary_operation[call[name[n]][name[i]] / name[Ns]] ** constant[2]]]]]] less_or_equal[<=] binary_operation[call[name[np].finfo, parameter[name[np].float]].eps / constant[2]]] begin[:] call[name[b]][name[i]] assign[=] binary_operation[binary_operation[constant[1] / constant[2.0]] * binary_operation[binary_operation[binary_operation[binary_operation[constant[1] + name[a]] * call[name[np].sin, parameter[binary_operation[binary_operation[binary_operation[constant[1] + name[a]] * name[np].pi] / binary_operation[constant[4.0] * name[a]]]]]] - binary_operation[binary_operation[constant[1] - name[a]] * call[name[np].cos, parameter[binary_operation[binary_operation[binary_operation[constant[1] - name[a]] * name[np].pi] / binary_operation[constant[4.0] * name[a]]]]]]] + binary_operation[binary_operation[binary_operation[constant[4] * name[a]] / name[np].pi] * call[name[np].sin, parameter[binary_operation[binary_operation[binary_operation[constant[1] - name[a]] * name[np].pi] / binary_operation[constant[4.0] * name[a]]]]]]]] return[name[b]]
keyword[def] identifier[sqrt_rc_imp] ( identifier[Ns] , identifier[alpha] , identifier[M] = literal[int] ): literal[string] identifier[n] = identifier[np] . identifier[arange] (- identifier[M] * identifier[Ns] , identifier[M] * identifier[Ns] + literal[int] ) identifier[b] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[n] )) identifier[Ns] *= literal[int] identifier[a] = identifier[alpha] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[n] )): keyword[if] identifier[abs] ( literal[int] - literal[int] * identifier[a] ** literal[int] *( identifier[n] [ identifier[i] ]/ identifier[Ns] )** literal[int] )<= identifier[np] . identifier[finfo] ( identifier[np] . identifier[float] ). identifier[eps] / literal[int] : identifier[b] [ identifier[i] ]= literal[int] / literal[int] *(( literal[int] + identifier[a] )* identifier[np] . identifier[sin] (( literal[int] + identifier[a] )* identifier[np] . identifier[pi] /( literal[int] * identifier[a] ))-( literal[int] - identifier[a] )* identifier[np] . identifier[cos] (( literal[int] - identifier[a] )* identifier[np] . identifier[pi] /( literal[int] * identifier[a] ))+( literal[int] * identifier[a] )/ identifier[np] . identifier[pi] * identifier[np] . identifier[sin] (( literal[int] - identifier[a] )* identifier[np] . identifier[pi] /( literal[int] * identifier[a] ))) keyword[else] : identifier[b] [ identifier[i] ]= literal[int] * identifier[a] /( identifier[np] . identifier[pi] *( literal[int] - literal[int] * identifier[a] ** literal[int] *( identifier[n] [ identifier[i] ]/ identifier[Ns] )** literal[int] )) identifier[b] [ identifier[i] ]= identifier[b] [ identifier[i] ]*( identifier[np] . identifier[cos] (( literal[int] + identifier[a] )* identifier[np] . identifier[pi] * identifier[n] [ identifier[i] ]/ identifier[Ns] )+ identifier[np] . identifier[sinc] (( literal[int] - identifier[a] )* identifier[n] [ identifier[i] ]/ identifier[Ns] )*( literal[int] - identifier[a] )* identifier[np] . identifier[pi] /( literal[int] * identifier[a] )) keyword[return] identifier[b]
def sqrt_rc_imp(Ns, alpha, M=6): """ A truncated square root raised cosine pulse used in digital communications. The pulse shaping factor :math:`0 < \\alpha < 1` is required as well as the truncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`. Parameters ---------- Ns : number of samples per symbol alpha : excess bandwidth factor on (0, 1), e.g., 0.35 M : equals RC one-sided symbol truncation factor Returns ------- b : ndarray containing the pulse shape Notes ----- The pulse shape b is typically used as the FIR filter coefficients when forming a pulse shaped digital communications waveform. When square root raised cosine (SRC) pulse is used to generate Tx signals and at the receiver used as a matched filter (receiver FIR filter), the received signal is now raised cosine shaped, thus having zero intersymbol interference and the optimum removal of additive white noise if present at the receiver input. Examples -------- Ten samples per symbol and :math:`\\alpha = 0.35`. >>> import matplotlib.pyplot as plt >>> from numpy import arange >>> from sk_dsp_comm.digitalcom import sqrt_rc_imp >>> b = sqrt_rc_imp(10,0.35) >>> n = arange(-10*6,10*6+1) >>> plt.stem(n,b) >>> plt.show() """ # Design the filter n = np.arange(-M * Ns, M * Ns + 1) b = np.zeros(len(n)) Ns *= 1.0 a = alpha for i in range(len(n)): if abs(1 - 16 * a ** 2 * (n[i] / Ns) ** 2) <= np.finfo(np.float).eps / 2: b[i] = 1 / 2.0 * ((1 + a) * np.sin((1 + a) * np.pi / (4.0 * a)) - (1 - a) * np.cos((1 - a) * np.pi / (4.0 * a)) + 4 * a / np.pi * np.sin((1 - a) * np.pi / (4.0 * a))) # depends on [control=['if'], data=[]] else: b[i] = 4 * a / (np.pi * (1 - 16 * a ** 2 * (n[i] / Ns) ** 2)) b[i] = b[i] * (np.cos((1 + a) * np.pi * n[i] / Ns) + np.sinc((1 - a) * n[i] / Ns) * (1 - a) * np.pi / (4.0 * a)) # depends on [control=['for'], data=['i']] return b
def _add_validator(fv, validator_instance): """Register new flags validator to be checked. Args: fv: flags.FlagValues, the FlagValues instance to add the validator. validator_instance: validators.Validator, the validator to add. Raises: KeyError: Raised when validators work with a non-existing flag. """ for flag_name in validator_instance.get_flags_names(): fv[flag_name].validators.append(validator_instance)
def function[_add_validator, parameter[fv, validator_instance]]: constant[Register new flags validator to be checked. Args: fv: flags.FlagValues, the FlagValues instance to add the validator. validator_instance: validators.Validator, the validator to add. Raises: KeyError: Raised when validators work with a non-existing flag. ] for taget[name[flag_name]] in starred[call[name[validator_instance].get_flags_names, parameter[]]] begin[:] call[call[name[fv]][name[flag_name]].validators.append, parameter[name[validator_instance]]]
keyword[def] identifier[_add_validator] ( identifier[fv] , identifier[validator_instance] ): literal[string] keyword[for] identifier[flag_name] keyword[in] identifier[validator_instance] . identifier[get_flags_names] (): identifier[fv] [ identifier[flag_name] ]. identifier[validators] . identifier[append] ( identifier[validator_instance] )
def _add_validator(fv, validator_instance): """Register new flags validator to be checked. Args: fv: flags.FlagValues, the FlagValues instance to add the validator. validator_instance: validators.Validator, the validator to add. Raises: KeyError: Raised when validators work with a non-existing flag. """ for flag_name in validator_instance.get_flags_names(): fv[flag_name].validators.append(validator_instance) # depends on [control=['for'], data=['flag_name']]
def run(args): """Start an oct project :param Namespace args: the commande-line arguments """ kwargs = vars(args) if 'func' in kwargs: del kwargs['func'] project_path = kwargs.pop('project_path') config = configure(project_path, kwargs.get('config_file')) output_dir = kwargs.pop('output_dir', None) or generate_output_path(args, project_path) stats_handler.init_stats(output_dir, config) topic = args.publisher_channel or uuid.uuid4().hex print("External publishing topic is %s" % topic) start_hq(output_dir, config, topic, **kwargs) if not args.no_results: process_results(output_dir, config) copy_config(project_path, output_dir) print('done.\n')
def function[run, parameter[args]]: constant[Start an oct project :param Namespace args: the commande-line arguments ] variable[kwargs] assign[=] call[name[vars], parameter[name[args]]] if compare[constant[func] in name[kwargs]] begin[:] <ast.Delete object at 0x7da18bcc8f40> variable[project_path] assign[=] call[name[kwargs].pop, parameter[constant[project_path]]] variable[config] assign[=] call[name[configure], parameter[name[project_path], call[name[kwargs].get, parameter[constant[config_file]]]]] variable[output_dir] assign[=] <ast.BoolOp object at 0x7da18bccb190> call[name[stats_handler].init_stats, parameter[name[output_dir], name[config]]] variable[topic] assign[=] <ast.BoolOp object at 0x7da18bccb520> call[name[print], parameter[binary_operation[constant[External publishing topic is %s] <ast.Mod object at 0x7da2590d6920> name[topic]]]] call[name[start_hq], parameter[name[output_dir], name[config], name[topic]]] if <ast.UnaryOp object at 0x7da18bcc9f60> begin[:] call[name[process_results], parameter[name[output_dir], name[config]]] call[name[copy_config], parameter[name[project_path], name[output_dir]]] call[name[print], parameter[constant[done. ]]]
keyword[def] identifier[run] ( identifier[args] ): literal[string] identifier[kwargs] = identifier[vars] ( identifier[args] ) keyword[if] literal[string] keyword[in] identifier[kwargs] : keyword[del] identifier[kwargs] [ literal[string] ] identifier[project_path] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[config] = identifier[configure] ( identifier[project_path] , identifier[kwargs] . identifier[get] ( literal[string] )) identifier[output_dir] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] ) keyword[or] identifier[generate_output_path] ( identifier[args] , identifier[project_path] ) identifier[stats_handler] . identifier[init_stats] ( identifier[output_dir] , identifier[config] ) identifier[topic] = identifier[args] . identifier[publisher_channel] keyword[or] identifier[uuid] . identifier[uuid4] (). identifier[hex] identifier[print] ( literal[string] % identifier[topic] ) identifier[start_hq] ( identifier[output_dir] , identifier[config] , identifier[topic] ,** identifier[kwargs] ) keyword[if] keyword[not] identifier[args] . identifier[no_results] : identifier[process_results] ( identifier[output_dir] , identifier[config] ) identifier[copy_config] ( identifier[project_path] , identifier[output_dir] ) identifier[print] ( literal[string] )
def run(args): """Start an oct project :param Namespace args: the commande-line arguments """ kwargs = vars(args) if 'func' in kwargs: del kwargs['func'] # depends on [control=['if'], data=['kwargs']] project_path = kwargs.pop('project_path') config = configure(project_path, kwargs.get('config_file')) output_dir = kwargs.pop('output_dir', None) or generate_output_path(args, project_path) stats_handler.init_stats(output_dir, config) topic = args.publisher_channel or uuid.uuid4().hex print('External publishing topic is %s' % topic) start_hq(output_dir, config, topic, **kwargs) if not args.no_results: process_results(output_dir, config) # depends on [control=['if'], data=[]] copy_config(project_path, output_dir) print('done.\n')
def user_post_save_handler(**kwargs): """Sends a metric to InfluxDB when a new User object is created.""" if kwargs.get('created'): total = get_user_model().objects.all().count() data = [{ 'measurement': 'django_auth_user_create', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': 1, }, 'time': timezone.now().isoformat(), }] write_points(data) data = [{ 'measurement': 'django_auth_user_count', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': total, }, 'time': timezone.now().isoformat(), }] write_points(data)
def function[user_post_save_handler, parameter[]]: constant[Sends a metric to InfluxDB when a new User object is created.] if call[name[kwargs].get, parameter[constant[created]]] begin[:] variable[total] assign[=] call[call[call[name[get_user_model], parameter[]].objects.all, parameter[]].count, parameter[]] variable[data] assign[=] list[[<ast.Dict object at 0x7da1b0698430>]] call[name[write_points], parameter[name[data]]] variable[data] assign[=] list[[<ast.Dict object at 0x7da18ede4d30>]] call[name[write_points], parameter[name[data]]]
keyword[def] identifier[user_post_save_handler] (** identifier[kwargs] ): literal[string] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): identifier[total] = identifier[get_user_model] (). identifier[objects] . identifier[all] (). identifier[count] () identifier[data] =[{ literal[string] : literal[string] , literal[string] :{ literal[string] : identifier[settings] . identifier[INFLUXDB_TAGS_HOST] ,}, literal[string] :{ literal[string] : literal[int] ,}, literal[string] : identifier[timezone] . identifier[now] (). identifier[isoformat] (), }] identifier[write_points] ( identifier[data] ) identifier[data] =[{ literal[string] : literal[string] , literal[string] :{ literal[string] : identifier[settings] . identifier[INFLUXDB_TAGS_HOST] ,}, literal[string] :{ literal[string] : identifier[total] ,}, literal[string] : identifier[timezone] . identifier[now] (). identifier[isoformat] (), }] identifier[write_points] ( identifier[data] )
def user_post_save_handler(**kwargs): """Sends a metric to InfluxDB when a new User object is created.""" if kwargs.get('created'): total = get_user_model().objects.all().count() data = [{'measurement': 'django_auth_user_create', 'tags': {'host': settings.INFLUXDB_TAGS_HOST}, 'fields': {'value': 1}, 'time': timezone.now().isoformat()}] write_points(data) data = [{'measurement': 'django_auth_user_count', 'tags': {'host': settings.INFLUXDB_TAGS_HOST}, 'fields': {'value': total}, 'time': timezone.now().isoformat()}] write_points(data) # depends on [control=['if'], data=[]]
def setup_pcap_inputs(self, input_data): ''' Write the PCAPs to disk for Bro to process and return the pcap filenames ''' # Setup the pcap in the input data for processing by Bro. The input # may be either an individual sample or a sample set. file_list = [] if 'sample' in input_data: raw_bytes = input_data['sample']['raw_bytes'] filename = os.path.basename(input_data['sample']['filename']) file_list.append({'filename': filename, 'bytes': raw_bytes}) else: for md5 in input_data['sample_set']['md5_list']: sample = self.workbench.get_sample(md5)['sample'] raw_bytes = sample['raw_bytes'] filename = os.path.basename(sample['filename']) file_list.append({'filename': filename, 'bytes': raw_bytes}) # Write the pcaps to disk and keep the filenames for Bro to process for file_info in file_list: with open(file_info['filename'], 'wb') as pcap_file: pcap_file.write(file_info['bytes']) # Return filenames return [file_info['filename'] for file_info in file_list]
def function[setup_pcap_inputs, parameter[self, input_data]]: constant[ Write the PCAPs to disk for Bro to process and return the pcap filenames ] variable[file_list] assign[=] list[[]] if compare[constant[sample] in name[input_data]] begin[:] variable[raw_bytes] assign[=] call[call[name[input_data]][constant[sample]]][constant[raw_bytes]] variable[filename] assign[=] call[name[os].path.basename, parameter[call[call[name[input_data]][constant[sample]]][constant[filename]]]] call[name[file_list].append, parameter[dictionary[[<ast.Constant object at 0x7da18bc70d30>, <ast.Constant object at 0x7da18bc729b0>], [<ast.Name object at 0x7da18bc73a90>, <ast.Name object at 0x7da18bc70700>]]]] for taget[name[file_info]] in starred[name[file_list]] begin[:] with call[name[open], parameter[call[name[file_info]][constant[filename]], constant[wb]]] begin[:] call[name[pcap_file].write, parameter[call[name[file_info]][constant[bytes]]]] return[<ast.ListComp object at 0x7da18bc728c0>]
keyword[def] identifier[setup_pcap_inputs] ( identifier[self] , identifier[input_data] ): literal[string] identifier[file_list] =[] keyword[if] literal[string] keyword[in] identifier[input_data] : identifier[raw_bytes] = identifier[input_data] [ literal[string] ][ literal[string] ] identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[input_data] [ literal[string] ][ literal[string] ]) identifier[file_list] . identifier[append] ({ literal[string] : identifier[filename] , literal[string] : identifier[raw_bytes] }) keyword[else] : keyword[for] identifier[md5] keyword[in] identifier[input_data] [ literal[string] ][ literal[string] ]: identifier[sample] = identifier[self] . identifier[workbench] . identifier[get_sample] ( identifier[md5] )[ literal[string] ] identifier[raw_bytes] = identifier[sample] [ literal[string] ] identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[sample] [ literal[string] ]) identifier[file_list] . identifier[append] ({ literal[string] : identifier[filename] , literal[string] : identifier[raw_bytes] }) keyword[for] identifier[file_info] keyword[in] identifier[file_list] : keyword[with] identifier[open] ( identifier[file_info] [ literal[string] ], literal[string] ) keyword[as] identifier[pcap_file] : identifier[pcap_file] . identifier[write] ( identifier[file_info] [ literal[string] ]) keyword[return] [ identifier[file_info] [ literal[string] ] keyword[for] identifier[file_info] keyword[in] identifier[file_list] ]
def setup_pcap_inputs(self, input_data): """ Write the PCAPs to disk for Bro to process and return the pcap filenames """ # Setup the pcap in the input data for processing by Bro. The input # may be either an individual sample or a sample set. file_list = [] if 'sample' in input_data: raw_bytes = input_data['sample']['raw_bytes'] filename = os.path.basename(input_data['sample']['filename']) file_list.append({'filename': filename, 'bytes': raw_bytes}) # depends on [control=['if'], data=['input_data']] else: for md5 in input_data['sample_set']['md5_list']: sample = self.workbench.get_sample(md5)['sample'] raw_bytes = sample['raw_bytes'] filename = os.path.basename(sample['filename']) file_list.append({'filename': filename, 'bytes': raw_bytes}) # depends on [control=['for'], data=['md5']] # Write the pcaps to disk and keep the filenames for Bro to process for file_info in file_list: with open(file_info['filename'], 'wb') as pcap_file: pcap_file.write(file_info['bytes']) # depends on [control=['with'], data=['pcap_file']] # depends on [control=['for'], data=['file_info']] # Return filenames return [file_info['filename'] for file_info in file_list]
def _handle_paper(article): """ Yields a :class:`.Paper` from an article ET node. Parameters ---------- article : Element ElementTree Element 'article'. Returns ------- paper : :class:`.Paper` """ paper = Paper() pdata = dict_from_node(article) for key, value in pdata.iteritems(): datum = pdata[key] if type(datum) is str: datum = unicode(datum) if type(datum) is unicode: datum = unidecode(datum).upper() paper[key] = datum # Handle author names. adata = _handle_authors(pdata['author']) paper.authors_init = zip(adata[0], adata[1]) # Handle pubdate. paper['date'] = _handle_pubdate(pdata['pubdate']) # Handle pagerange. paper['spage'], paper['epage'] = _handle_pagerange(pdata['pagerange']) return paper
def function[_handle_paper, parameter[article]]: constant[ Yields a :class:`.Paper` from an article ET node. Parameters ---------- article : Element ElementTree Element 'article'. Returns ------- paper : :class:`.Paper` ] variable[paper] assign[=] call[name[Paper], parameter[]] variable[pdata] assign[=] call[name[dict_from_node], parameter[name[article]]] for taget[tuple[[<ast.Name object at 0x7da1b11f8fd0>, <ast.Name object at 0x7da1b11f9000>]]] in starred[call[name[pdata].iteritems, parameter[]]] begin[:] variable[datum] assign[=] call[name[pdata]][name[key]] if compare[call[name[type], parameter[name[datum]]] is name[str]] begin[:] variable[datum] assign[=] call[name[unicode], parameter[name[datum]]] if compare[call[name[type], parameter[name[datum]]] is name[unicode]] begin[:] variable[datum] assign[=] call[call[name[unidecode], parameter[name[datum]]].upper, parameter[]] call[name[paper]][name[key]] assign[=] name[datum] variable[adata] assign[=] call[name[_handle_authors], parameter[call[name[pdata]][constant[author]]]] name[paper].authors_init assign[=] call[name[zip], parameter[call[name[adata]][constant[0]], call[name[adata]][constant[1]]]] call[name[paper]][constant[date]] assign[=] call[name[_handle_pubdate], parameter[call[name[pdata]][constant[pubdate]]]] <ast.Tuple object at 0x7da1b11fa260> assign[=] call[name[_handle_pagerange], parameter[call[name[pdata]][constant[pagerange]]]] return[name[paper]]
keyword[def] identifier[_handle_paper] ( identifier[article] ): literal[string] identifier[paper] = identifier[Paper] () identifier[pdata] = identifier[dict_from_node] ( identifier[article] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[pdata] . identifier[iteritems] (): identifier[datum] = identifier[pdata] [ identifier[key] ] keyword[if] identifier[type] ( identifier[datum] ) keyword[is] identifier[str] : identifier[datum] = identifier[unicode] ( identifier[datum] ) keyword[if] identifier[type] ( identifier[datum] ) keyword[is] identifier[unicode] : identifier[datum] = identifier[unidecode] ( identifier[datum] ). identifier[upper] () identifier[paper] [ identifier[key] ]= identifier[datum] identifier[adata] = identifier[_handle_authors] ( identifier[pdata] [ literal[string] ]) identifier[paper] . identifier[authors_init] = identifier[zip] ( identifier[adata] [ literal[int] ], identifier[adata] [ literal[int] ]) identifier[paper] [ literal[string] ]= identifier[_handle_pubdate] ( identifier[pdata] [ literal[string] ]) identifier[paper] [ literal[string] ], identifier[paper] [ literal[string] ]= identifier[_handle_pagerange] ( identifier[pdata] [ literal[string] ]) keyword[return] identifier[paper]
def _handle_paper(article): """ Yields a :class:`.Paper` from an article ET node. Parameters ---------- article : Element ElementTree Element 'article'. Returns ------- paper : :class:`.Paper` """ paper = Paper() pdata = dict_from_node(article) for (key, value) in pdata.iteritems(): datum = pdata[key] if type(datum) is str: datum = unicode(datum) # depends on [control=['if'], data=[]] if type(datum) is unicode: datum = unidecode(datum).upper() # depends on [control=['if'], data=[]] paper[key] = datum # depends on [control=['for'], data=[]] # Handle author names. adata = _handle_authors(pdata['author']) paper.authors_init = zip(adata[0], adata[1]) # Handle pubdate. paper['date'] = _handle_pubdate(pdata['pubdate']) # Handle pagerange. (paper['spage'], paper['epage']) = _handle_pagerange(pdata['pagerange']) return paper
def map(self, mapper): """ Map categories using input correspondence (dict, Series, or function). Parameters ---------- mapper : dict, Series, callable The correspondence from old values to new. Returns ------- SparseArray The output array will have the same density as the input. The output fill value will be the result of applying the mapping to ``self.fill_value`` Examples -------- >>> arr = pd.SparseArray([0, 1, 2]) >>> arr.apply(lambda x: x + 10) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply({0: 10, 1: 11, 2: 12}) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2])) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) """ # this is used in apply. # We get hit since we're an "is_extension_type" but regular extension # types are not hit. This may be worth adding to the interface. if isinstance(mapper, ABCSeries): mapper = mapper.to_dict() if isinstance(mapper, abc.Mapping): fill_value = mapper.get(self.fill_value, self.fill_value) sp_values = [mapper.get(x, None) for x in self.sp_values] else: fill_value = mapper(self.fill_value) sp_values = [mapper(x) for x in self.sp_values] return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_value)
def function[map, parameter[self, mapper]]: constant[ Map categories using input correspondence (dict, Series, or function). Parameters ---------- mapper : dict, Series, callable The correspondence from old values to new. Returns ------- SparseArray The output array will have the same density as the input. The output fill value will be the result of applying the mapping to ``self.fill_value`` Examples -------- >>> arr = pd.SparseArray([0, 1, 2]) >>> arr.apply(lambda x: x + 10) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply({0: 10, 1: 11, 2: 12}) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2])) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) ] if call[name[isinstance], parameter[name[mapper], name[ABCSeries]]] begin[:] variable[mapper] assign[=] call[name[mapper].to_dict, parameter[]] if call[name[isinstance], parameter[name[mapper], name[abc].Mapping]] begin[:] variable[fill_value] assign[=] call[name[mapper].get, parameter[name[self].fill_value, name[self].fill_value]] variable[sp_values] assign[=] <ast.ListComp object at 0x7da20e9b26e0> return[call[call[name[type], parameter[name[self]]], parameter[name[sp_values]]]]
keyword[def] identifier[map] ( identifier[self] , identifier[mapper] ): literal[string] keyword[if] identifier[isinstance] ( identifier[mapper] , identifier[ABCSeries] ): identifier[mapper] = identifier[mapper] . identifier[to_dict] () keyword[if] identifier[isinstance] ( identifier[mapper] , identifier[abc] . identifier[Mapping] ): identifier[fill_value] = identifier[mapper] . identifier[get] ( identifier[self] . identifier[fill_value] , identifier[self] . identifier[fill_value] ) identifier[sp_values] =[ identifier[mapper] . identifier[get] ( identifier[x] , keyword[None] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[sp_values] ] keyword[else] : identifier[fill_value] = identifier[mapper] ( identifier[self] . identifier[fill_value] ) identifier[sp_values] =[ identifier[mapper] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[sp_values] ] keyword[return] identifier[type] ( identifier[self] )( identifier[sp_values] , identifier[sparse_index] = identifier[self] . identifier[sp_index] , identifier[fill_value] = identifier[fill_value] )
def map(self, mapper): """ Map categories using input correspondence (dict, Series, or function). Parameters ---------- mapper : dict, Series, callable The correspondence from old values to new. Returns ------- SparseArray The output array will have the same density as the input. The output fill value will be the result of applying the mapping to ``self.fill_value`` Examples -------- >>> arr = pd.SparseArray([0, 1, 2]) >>> arr.apply(lambda x: x + 10) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply({0: 10, 1: 11, 2: 12}) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2])) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) """ # this is used in apply. # We get hit since we're an "is_extension_type" but regular extension # types are not hit. This may be worth adding to the interface. if isinstance(mapper, ABCSeries): mapper = mapper.to_dict() # depends on [control=['if'], data=[]] if isinstance(mapper, abc.Mapping): fill_value = mapper.get(self.fill_value, self.fill_value) sp_values = [mapper.get(x, None) for x in self.sp_values] # depends on [control=['if'], data=[]] else: fill_value = mapper(self.fill_value) sp_values = [mapper(x) for x in self.sp_values] return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_value)
def libvlc_media_list_player_play_item(p_mlp, p_md): '''Play the given media item. @param p_mlp: media list player instance. @param p_md: the media instance. @return: 0 upon success, -1 if the media is not part of the media list. ''' f = _Cfunctions.get('libvlc_media_list_player_play_item', None) or \ _Cfunction('libvlc_media_list_player_play_item', ((1,), (1,),), None, ctypes.c_int, MediaListPlayer, Media) return f(p_mlp, p_md)
def function[libvlc_media_list_player_play_item, parameter[p_mlp, p_md]]: constant[Play the given media item. @param p_mlp: media list player instance. @param p_md: the media instance. @return: 0 upon success, -1 if the media is not part of the media list. ] variable[f] assign[=] <ast.BoolOp object at 0x7da1b26afb20> return[call[name[f], parameter[name[p_mlp], name[p_md]]]]
keyword[def] identifier[libvlc_media_list_player_play_item] ( identifier[p_mlp] , identifier[p_md] ): literal[string] identifier[f] = identifier[_Cfunctions] . identifier[get] ( literal[string] , keyword[None] ) keyword[or] identifier[_Cfunction] ( literal[string] ,(( literal[int] ,),( literal[int] ,),), keyword[None] , identifier[ctypes] . identifier[c_int] , identifier[MediaListPlayer] , identifier[Media] ) keyword[return] identifier[f] ( identifier[p_mlp] , identifier[p_md] )
def libvlc_media_list_player_play_item(p_mlp, p_md): """Play the given media item. @param p_mlp: media list player instance. @param p_md: the media instance. @return: 0 upon success, -1 if the media is not part of the media list. """ f = _Cfunctions.get('libvlc_media_list_player_play_item', None) or _Cfunction('libvlc_media_list_player_play_item', ((1,), (1,)), None, ctypes.c_int, MediaListPlayer, Media) return f(p_mlp, p_md)
def parse_operand(self, buf): """ Parses an operand from buf :param buf: a buffer :type buf: iterator/generator/string """ buf = iter(buf) try: operand = 0 for _ in range(self.operand_size): operand <<= 8 operand |= next(buf) self._operand = operand except StopIteration: raise ParseError("Not enough data for decoding")
def function[parse_operand, parameter[self, buf]]: constant[ Parses an operand from buf :param buf: a buffer :type buf: iterator/generator/string ] variable[buf] assign[=] call[name[iter], parameter[name[buf]]] <ast.Try object at 0x7da1b00542b0>
keyword[def] identifier[parse_operand] ( identifier[self] , identifier[buf] ): literal[string] identifier[buf] = identifier[iter] ( identifier[buf] ) keyword[try] : identifier[operand] = literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[self] . identifier[operand_size] ): identifier[operand] <<= literal[int] identifier[operand] |= identifier[next] ( identifier[buf] ) identifier[self] . identifier[_operand] = identifier[operand] keyword[except] identifier[StopIteration] : keyword[raise] identifier[ParseError] ( literal[string] )
def parse_operand(self, buf): """ Parses an operand from buf :param buf: a buffer :type buf: iterator/generator/string """ buf = iter(buf) try: operand = 0 for _ in range(self.operand_size): operand <<= 8 operand |= next(buf) # depends on [control=['for'], data=[]] self._operand = operand # depends on [control=['try'], data=[]] except StopIteration: raise ParseError('Not enough data for decoding') # depends on [control=['except'], data=[]]
def preprocess_plain_text_file(self, filename, pmid, extra_annotations): """Preprocess a plain text file for use with ISI reder. Preprocessing results in a new text file with one sentence per line. Parameters ---------- filename : str The name of the plain text file pmid : str The PMID from which it comes, or None if not specified extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden) """ with codecs.open(filename, 'r', encoding='utf-8') as f: content = f.read() self.preprocess_plain_text_string(content, pmid, extra_annotations)
def function[preprocess_plain_text_file, parameter[self, filename, pmid, extra_annotations]]: constant[Preprocess a plain text file for use with ISI reder. Preprocessing results in a new text file with one sentence per line. Parameters ---------- filename : str The name of the plain text file pmid : str The PMID from which it comes, or None if not specified extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden) ] with call[name[codecs].open, parameter[name[filename], constant[r]]] begin[:] variable[content] assign[=] call[name[f].read, parameter[]] call[name[self].preprocess_plain_text_string, parameter[name[content], name[pmid], name[extra_annotations]]]
keyword[def] identifier[preprocess_plain_text_file] ( identifier[self] , identifier[filename] , identifier[pmid] , identifier[extra_annotations] ): literal[string] keyword[with] identifier[codecs] . identifier[open] ( identifier[filename] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] : identifier[content] = identifier[f] . identifier[read] () identifier[self] . identifier[preprocess_plain_text_string] ( identifier[content] , identifier[pmid] , identifier[extra_annotations] )
def preprocess_plain_text_file(self, filename, pmid, extra_annotations): """Preprocess a plain text file for use with ISI reder. Preprocessing results in a new text file with one sentence per line. Parameters ---------- filename : str The name of the plain text file pmid : str The PMID from which it comes, or None if not specified extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden) """ with codecs.open(filename, 'r', encoding='utf-8') as f: content = f.read() self.preprocess_plain_text_string(content, pmid, extra_annotations) # depends on [control=['with'], data=['f']]
def import_obj(clsname, default_module=None): """ Import the object given by clsname. If default_module is specified, import from this module. """ if default_module is not None: if not clsname.startswith(default_module + '.'): clsname = '{0}.{1}'.format(default_module, clsname) mod, clsname = clsname.rsplit('.', 1) mod = importlib.import_module(mod) try: obj = getattr(mod, clsname) except AttributeError: raise ImportError('Cannot import {0} from {1}'.format(clsname, mod)) return obj
def function[import_obj, parameter[clsname, default_module]]: constant[ Import the object given by clsname. If default_module is specified, import from this module. ] if compare[name[default_module] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da207f02aa0> begin[:] variable[clsname] assign[=] call[constant[{0}.{1}].format, parameter[name[default_module], name[clsname]]] <ast.Tuple object at 0x7da2044c01c0> assign[=] call[name[clsname].rsplit, parameter[constant[.], constant[1]]] variable[mod] assign[=] call[name[importlib].import_module, parameter[name[mod]]] <ast.Try object at 0x7da2041dabf0> return[name[obj]]
keyword[def] identifier[import_obj] ( identifier[clsname] , identifier[default_module] = keyword[None] ): literal[string] keyword[if] identifier[default_module] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[clsname] . identifier[startswith] ( identifier[default_module] + literal[string] ): identifier[clsname] = literal[string] . identifier[format] ( identifier[default_module] , identifier[clsname] ) identifier[mod] , identifier[clsname] = identifier[clsname] . identifier[rsplit] ( literal[string] , literal[int] ) identifier[mod] = identifier[importlib] . identifier[import_module] ( identifier[mod] ) keyword[try] : identifier[obj] = identifier[getattr] ( identifier[mod] , identifier[clsname] ) keyword[except] identifier[AttributeError] : keyword[raise] identifier[ImportError] ( literal[string] . identifier[format] ( identifier[clsname] , identifier[mod] )) keyword[return] identifier[obj]
def import_obj(clsname, default_module=None): """ Import the object given by clsname. If default_module is specified, import from this module. """ if default_module is not None: if not clsname.startswith(default_module + '.'): clsname = '{0}.{1}'.format(default_module, clsname) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['default_module']] (mod, clsname) = clsname.rsplit('.', 1) mod = importlib.import_module(mod) try: obj = getattr(mod, clsname) # depends on [control=['try'], data=[]] except AttributeError: raise ImportError('Cannot import {0} from {1}'.format(clsname, mod)) # depends on [control=['except'], data=[]] return obj
def filter_values(d, vals=None, list_of_dicts=False, deepcopy=True): """ filters leaf nodes of nested dictionary Parameters ---------- d : dict vals : list values to filter by list_of_dicts: bool treat list of dicts as additional branches deepcopy: bool deepcopy values Examples -------- >>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a'}}} >>> filter_values(d,['a']) {4: {5: {6: 'a'}}} """ vals = [] if vals is None else vals list_of_dicts = '__list__' if list_of_dicts else None flatd = flatten(d, list_of_dicts=list_of_dicts) def is_in(a, b): try: return a in b except Exception: return False flatd = {k: v for k, v in flatd.items() if is_in(v, vals)} return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy)
def function[filter_values, parameter[d, vals, list_of_dicts, deepcopy]]: constant[ filters leaf nodes of nested dictionary Parameters ---------- d : dict vals : list values to filter by list_of_dicts: bool treat list of dicts as additional branches deepcopy: bool deepcopy values Examples -------- >>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a'}}} >>> filter_values(d,['a']) {4: {5: {6: 'a'}}} ] variable[vals] assign[=] <ast.IfExp object at 0x7da18eb55bd0> variable[list_of_dicts] assign[=] <ast.IfExp object at 0x7da18eb56530> variable[flatd] assign[=] call[name[flatten], parameter[name[d]]] def function[is_in, parameter[a, b]]: <ast.Try object at 0x7da18eb54910> variable[flatd] assign[=] <ast.DictComp object at 0x7da18eb55870> return[call[name[unflatten], parameter[name[flatd]]]]
keyword[def] identifier[filter_values] ( identifier[d] , identifier[vals] = keyword[None] , identifier[list_of_dicts] = keyword[False] , identifier[deepcopy] = keyword[True] ): literal[string] identifier[vals] =[] keyword[if] identifier[vals] keyword[is] keyword[None] keyword[else] identifier[vals] identifier[list_of_dicts] = literal[string] keyword[if] identifier[list_of_dicts] keyword[else] keyword[None] identifier[flatd] = identifier[flatten] ( identifier[d] , identifier[list_of_dicts] = identifier[list_of_dicts] ) keyword[def] identifier[is_in] ( identifier[a] , identifier[b] ): keyword[try] : keyword[return] identifier[a] keyword[in] identifier[b] keyword[except] identifier[Exception] : keyword[return] keyword[False] identifier[flatd] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[flatd] . identifier[items] () keyword[if] identifier[is_in] ( identifier[v] , identifier[vals] )} keyword[return] identifier[unflatten] ( identifier[flatd] , identifier[list_of_dicts] = identifier[list_of_dicts] , identifier[deepcopy] = identifier[deepcopy] )
def filter_values(d, vals=None, list_of_dicts=False, deepcopy=True): """ filters leaf nodes of nested dictionary Parameters ---------- d : dict vals : list values to filter by list_of_dicts: bool treat list of dicts as additional branches deepcopy: bool deepcopy values Examples -------- >>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a'}}} >>> filter_values(d,['a']) {4: {5: {6: 'a'}}} """ vals = [] if vals is None else vals list_of_dicts = '__list__' if list_of_dicts else None flatd = flatten(d, list_of_dicts=list_of_dicts) def is_in(a, b): try: return a in b # depends on [control=['try'], data=[]] except Exception: return False # depends on [control=['except'], data=[]] flatd = {k: v for (k, v) in flatd.items() if is_in(v, vals)} return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy)
def site_config_dir(self): """Return ``site_config_dir``.""" directory = appdirs.site_config_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath) if self.create: self._ensure_directory_exists(directory) return directory
def function[site_config_dir, parameter[self]]: constant[Return ``site_config_dir``.] variable[directory] assign[=] call[name[appdirs].site_config_dir, parameter[name[self].appname, name[self].appauthor]] if name[self].create begin[:] call[name[self]._ensure_directory_exists, parameter[name[directory]]] return[name[directory]]
keyword[def] identifier[site_config_dir] ( identifier[self] ): literal[string] identifier[directory] = identifier[appdirs] . identifier[site_config_dir] ( identifier[self] . identifier[appname] , identifier[self] . identifier[appauthor] , identifier[version] = identifier[self] . identifier[version] , identifier[multipath] = identifier[self] . identifier[multipath] ) keyword[if] identifier[self] . identifier[create] : identifier[self] . identifier[_ensure_directory_exists] ( identifier[directory] ) keyword[return] identifier[directory]
def site_config_dir(self): """Return ``site_config_dir``.""" directory = appdirs.site_config_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath) if self.create: self._ensure_directory_exists(directory) # depends on [control=['if'], data=[]] return directory
def field_cache_to_index_pattern(self, field_cache): """Return a .kibana index-pattern doc_type""" mapping_dict = {} mapping_dict['customFormats'] = "{}" mapping_dict['title'] = self.index_pattern # now post the data into .kibana mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':')) # in order to post, we need to create the post string mapping_str = json.dumps(mapping_dict, separators=(',', ':')) return mapping_str
def function[field_cache_to_index_pattern, parameter[self, field_cache]]: constant[Return a .kibana index-pattern doc_type] variable[mapping_dict] assign[=] dictionary[[], []] call[name[mapping_dict]][constant[customFormats]] assign[=] constant[{}] call[name[mapping_dict]][constant[title]] assign[=] name[self].index_pattern call[name[mapping_dict]][constant[fields]] assign[=] call[name[json].dumps, parameter[name[field_cache]]] variable[mapping_str] assign[=] call[name[json].dumps, parameter[name[mapping_dict]]] return[name[mapping_str]]
keyword[def] identifier[field_cache_to_index_pattern] ( identifier[self] , identifier[field_cache] ): literal[string] identifier[mapping_dict] ={} identifier[mapping_dict] [ literal[string] ]= literal[string] identifier[mapping_dict] [ literal[string] ]= identifier[self] . identifier[index_pattern] identifier[mapping_dict] [ literal[string] ]= identifier[json] . identifier[dumps] ( identifier[field_cache] , identifier[separators] =( literal[string] , literal[string] )) identifier[mapping_str] = identifier[json] . identifier[dumps] ( identifier[mapping_dict] , identifier[separators] =( literal[string] , literal[string] )) keyword[return] identifier[mapping_str]
def field_cache_to_index_pattern(self, field_cache): """Return a .kibana index-pattern doc_type""" mapping_dict = {} mapping_dict['customFormats'] = '{}' mapping_dict['title'] = self.index_pattern # now post the data into .kibana mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':')) # in order to post, we need to create the post string mapping_str = json.dumps(mapping_dict, separators=(',', ':')) return mapping_str
def from_keyed_iterable(iterable, key, filter_func=None): """Construct a dictionary out of an iterable, using an attribute name as the key. Optionally provide a filter function, to determine what should be kept in the dictionary.""" generated = {} for element in iterable: try: k = getattr(element, key) except AttributeError: raise RuntimeError("{} does not have the keyed attribute: {}".format( element, key )) if filter_func is None or filter_func(element): if k in generated: generated[k] += [element] else: generated[k] = [element] return generated
def function[from_keyed_iterable, parameter[iterable, key, filter_func]]: constant[Construct a dictionary out of an iterable, using an attribute name as the key. Optionally provide a filter function, to determine what should be kept in the dictionary.] variable[generated] assign[=] dictionary[[], []] for taget[name[element]] in starred[name[iterable]] begin[:] <ast.Try object at 0x7da1b287c250> if <ast.BoolOp object at 0x7da1b28f1090> begin[:] if compare[name[k] in name[generated]] begin[:] <ast.AugAssign object at 0x7da1b28f2650> return[name[generated]]
keyword[def] identifier[from_keyed_iterable] ( identifier[iterable] , identifier[key] , identifier[filter_func] = keyword[None] ): literal[string] identifier[generated] ={} keyword[for] identifier[element] keyword[in] identifier[iterable] : keyword[try] : identifier[k] = identifier[getattr] ( identifier[element] , identifier[key] ) keyword[except] identifier[AttributeError] : keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[element] , identifier[key] )) keyword[if] identifier[filter_func] keyword[is] keyword[None] keyword[or] identifier[filter_func] ( identifier[element] ): keyword[if] identifier[k] keyword[in] identifier[generated] : identifier[generated] [ identifier[k] ]+=[ identifier[element] ] keyword[else] : identifier[generated] [ identifier[k] ]=[ identifier[element] ] keyword[return] identifier[generated]
def from_keyed_iterable(iterable, key, filter_func=None): """Construct a dictionary out of an iterable, using an attribute name as the key. Optionally provide a filter function, to determine what should be kept in the dictionary.""" generated = {} for element in iterable: try: k = getattr(element, key) # depends on [control=['try'], data=[]] except AttributeError: raise RuntimeError('{} does not have the keyed attribute: {}'.format(element, key)) # depends on [control=['except'], data=[]] if filter_func is None or filter_func(element): if k in generated: generated[k] += [element] # depends on [control=['if'], data=['k', 'generated']] else: generated[k] = [element] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['element']] return generated
def print_row(self, row, rstrip=True): """ Format and print the pre-rendered data to the output device. """ line = ''.join(map(str, row)) print(line.rstrip() if rstrip else line, file=self.table.file)
def function[print_row, parameter[self, row, rstrip]]: constant[ Format and print the pre-rendered data to the output device. ] variable[line] assign[=] call[constant[].join, parameter[call[name[map], parameter[name[str], name[row]]]]] call[name[print], parameter[<ast.IfExp object at 0x7da2041dabf0>]]
keyword[def] identifier[print_row] ( identifier[self] , identifier[row] , identifier[rstrip] = keyword[True] ): literal[string] identifier[line] = literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[row] )) identifier[print] ( identifier[line] . identifier[rstrip] () keyword[if] identifier[rstrip] keyword[else] identifier[line] , identifier[file] = identifier[self] . identifier[table] . identifier[file] )
def print_row(self, row, rstrip=True): """ Format and print the pre-rendered data to the output device. """ line = ''.join(map(str, row)) print(line.rstrip() if rstrip else line, file=self.table.file)
def com_google_fonts_check_metadata_copyright(family_metadata): """METADATA.pb: Copyright notice is the same in all fonts?""" copyright = None fail = False for f in family_metadata.fonts: if copyright and f.copyright != copyright: fail = True copyright = f.copyright if fail: yield FAIL, ("METADATA.pb: Copyright field value" " is inconsistent across family") else: yield PASS, "Copyright is consistent across family"
def function[com_google_fonts_check_metadata_copyright, parameter[family_metadata]]: constant[METADATA.pb: Copyright notice is the same in all fonts?] variable[copyright] assign[=] constant[None] variable[fail] assign[=] constant[False] for taget[name[f]] in starred[name[family_metadata].fonts] begin[:] if <ast.BoolOp object at 0x7da1b12f3700> begin[:] variable[fail] assign[=] constant[True] variable[copyright] assign[=] name[f].copyright if name[fail] begin[:] <ast.Yield object at 0x7da1b12f12d0>
keyword[def] identifier[com_google_fonts_check_metadata_copyright] ( identifier[family_metadata] ): literal[string] identifier[copyright] = keyword[None] identifier[fail] = keyword[False] keyword[for] identifier[f] keyword[in] identifier[family_metadata] . identifier[fonts] : keyword[if] identifier[copyright] keyword[and] identifier[f] . identifier[copyright] != identifier[copyright] : identifier[fail] = keyword[True] identifier[copyright] = identifier[f] . identifier[copyright] keyword[if] identifier[fail] : keyword[yield] identifier[FAIL] ,( literal[string] literal[string] ) keyword[else] : keyword[yield] identifier[PASS] , literal[string]
def com_google_fonts_check_metadata_copyright(family_metadata): """METADATA.pb: Copyright notice is the same in all fonts?""" copyright = None fail = False for f in family_metadata.fonts: if copyright and f.copyright != copyright: fail = True # depends on [control=['if'], data=[]] copyright = f.copyright # depends on [control=['for'], data=['f']] if fail: yield (FAIL, 'METADATA.pb: Copyright field value is inconsistent across family') # depends on [control=['if'], data=[]] else: yield (PASS, 'Copyright is consistent across family')
def profile_loglike(self, x): """Profile log-likelihood. Returns ``L_prof(x,y=y_min|z')`` : where y_min is the value of y that minimizes L for a given x. This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed. """ if self._prof_interp is None: # This calculates values and caches the spline return self._profile_loglike(x)[1] x = np.array(x, ndmin=1) return self._prof_interp(x)
def function[profile_loglike, parameter[self, x]]: constant[Profile log-likelihood. Returns ``L_prof(x,y=y_min|z')`` : where y_min is the value of y that minimizes L for a given x. This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed. ] if compare[name[self]._prof_interp is constant[None]] begin[:] return[call[call[name[self]._profile_loglike, parameter[name[x]]]][constant[1]]] variable[x] assign[=] call[name[np].array, parameter[name[x]]] return[call[name[self]._prof_interp, parameter[name[x]]]]
keyword[def] identifier[profile_loglike] ( identifier[self] , identifier[x] ): literal[string] keyword[if] identifier[self] . identifier[_prof_interp] keyword[is] keyword[None] : keyword[return] identifier[self] . identifier[_profile_loglike] ( identifier[x] )[ literal[int] ] identifier[x] = identifier[np] . identifier[array] ( identifier[x] , identifier[ndmin] = literal[int] ) keyword[return] identifier[self] . identifier[_prof_interp] ( identifier[x] )
def profile_loglike(self, x): """Profile log-likelihood. Returns ``L_prof(x,y=y_min|z')`` : where y_min is the value of y that minimizes L for a given x. This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed. """ if self._prof_interp is None: # This calculates values and caches the spline return self._profile_loglike(x)[1] # depends on [control=['if'], data=[]] x = np.array(x, ndmin=1) return self._prof_interp(x)
def is_pg_at_least_nine_two(self): """ Some queries have different syntax depending what version of postgres we are querying against. :returns: boolean """ if self._is_pg_at_least_nine_two is None: results = self.version() regex = re.compile("PostgreSQL (\d+\.\d+\.\d+) on") matches = regex.match(results[0].version) version = matches.groups()[0] if version > '9.2.0': self._is_pg_at_least_nine_two = True else: self._is_pg_at_least_nine_two = False return self._is_pg_at_least_nine_two
def function[is_pg_at_least_nine_two, parameter[self]]: constant[ Some queries have different syntax depending what version of postgres we are querying against. :returns: boolean ] if compare[name[self]._is_pg_at_least_nine_two is constant[None]] begin[:] variable[results] assign[=] call[name[self].version, parameter[]] variable[regex] assign[=] call[name[re].compile, parameter[constant[PostgreSQL (\d+\.\d+\.\d+) on]]] variable[matches] assign[=] call[name[regex].match, parameter[call[name[results]][constant[0]].version]] variable[version] assign[=] call[call[name[matches].groups, parameter[]]][constant[0]] if compare[name[version] greater[>] constant[9.2.0]] begin[:] name[self]._is_pg_at_least_nine_two assign[=] constant[True] return[name[self]._is_pg_at_least_nine_two]
keyword[def] identifier[is_pg_at_least_nine_two] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_is_pg_at_least_nine_two] keyword[is] keyword[None] : identifier[results] = identifier[self] . identifier[version] () identifier[regex] = identifier[re] . identifier[compile] ( literal[string] ) identifier[matches] = identifier[regex] . identifier[match] ( identifier[results] [ literal[int] ]. identifier[version] ) identifier[version] = identifier[matches] . identifier[groups] ()[ literal[int] ] keyword[if] identifier[version] > literal[string] : identifier[self] . identifier[_is_pg_at_least_nine_two] = keyword[True] keyword[else] : identifier[self] . identifier[_is_pg_at_least_nine_two] = keyword[False] keyword[return] identifier[self] . identifier[_is_pg_at_least_nine_two]
def is_pg_at_least_nine_two(self): """ Some queries have different syntax depending what version of postgres we are querying against. :returns: boolean """ if self._is_pg_at_least_nine_two is None: results = self.version() regex = re.compile('PostgreSQL (\\d+\\.\\d+\\.\\d+) on') matches = regex.match(results[0].version) version = matches.groups()[0] if version > '9.2.0': self._is_pg_at_least_nine_two = True # depends on [control=['if'], data=[]] else: self._is_pg_at_least_nine_two = False # depends on [control=['if'], data=[]] return self._is_pg_at_least_nine_two
def dump(self, obj): """ Dumps the given object in the Java serialization format """ self.references = [] self.object_obj = obj self.object_stream = BytesIO() self._writeStreamHeader() self.writeObject(obj) return self.object_stream.getvalue()
def function[dump, parameter[self, obj]]: constant[ Dumps the given object in the Java serialization format ] name[self].references assign[=] list[[]] name[self].object_obj assign[=] name[obj] name[self].object_stream assign[=] call[name[BytesIO], parameter[]] call[name[self]._writeStreamHeader, parameter[]] call[name[self].writeObject, parameter[name[obj]]] return[call[name[self].object_stream.getvalue, parameter[]]]
keyword[def] identifier[dump] ( identifier[self] , identifier[obj] ): literal[string] identifier[self] . identifier[references] =[] identifier[self] . identifier[object_obj] = identifier[obj] identifier[self] . identifier[object_stream] = identifier[BytesIO] () identifier[self] . identifier[_writeStreamHeader] () identifier[self] . identifier[writeObject] ( identifier[obj] ) keyword[return] identifier[self] . identifier[object_stream] . identifier[getvalue] ()
def dump(self, obj): """ Dumps the given object in the Java serialization format """ self.references = [] self.object_obj = obj self.object_stream = BytesIO() self._writeStreamHeader() self.writeObject(obj) return self.object_stream.getvalue()
def GetCompressedStreamTypeIndicators(cls, path_spec, resolver_context=None): """Determines if a file contains a supported compressed stream types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators. """ if (cls._compressed_stream_remainder_list is None or cls._compressed_stream_store is None): specification_store, remainder_list = cls._GetSpecificationStore( definitions.FORMAT_CATEGORY_COMPRESSED_STREAM) cls._compressed_stream_remainder_list = remainder_list cls._compressed_stream_store = specification_store if cls._compressed_stream_scanner is None: cls._compressed_stream_scanner = cls._GetSignatureScanner( cls._compressed_stream_store) return cls._GetTypeIndicators( cls._compressed_stream_scanner, cls._compressed_stream_store, cls._compressed_stream_remainder_list, path_spec, resolver_context=resolver_context)
def function[GetCompressedStreamTypeIndicators, parameter[cls, path_spec, resolver_context]]: constant[Determines if a file contains a supported compressed stream types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators. ] if <ast.BoolOp object at 0x7da1b07a2fb0> begin[:] <ast.Tuple object at 0x7da1b07a16c0> assign[=] call[name[cls]._GetSpecificationStore, parameter[name[definitions].FORMAT_CATEGORY_COMPRESSED_STREAM]] name[cls]._compressed_stream_remainder_list assign[=] name[remainder_list] name[cls]._compressed_stream_store assign[=] name[specification_store] if compare[name[cls]._compressed_stream_scanner is constant[None]] begin[:] name[cls]._compressed_stream_scanner assign[=] call[name[cls]._GetSignatureScanner, parameter[name[cls]._compressed_stream_store]] return[call[name[cls]._GetTypeIndicators, parameter[name[cls]._compressed_stream_scanner, name[cls]._compressed_stream_store, name[cls]._compressed_stream_remainder_list, name[path_spec]]]]
keyword[def] identifier[GetCompressedStreamTypeIndicators] ( identifier[cls] , identifier[path_spec] , identifier[resolver_context] = keyword[None] ): literal[string] keyword[if] ( identifier[cls] . identifier[_compressed_stream_remainder_list] keyword[is] keyword[None] keyword[or] identifier[cls] . identifier[_compressed_stream_store] keyword[is] keyword[None] ): identifier[specification_store] , identifier[remainder_list] = identifier[cls] . identifier[_GetSpecificationStore] ( identifier[definitions] . identifier[FORMAT_CATEGORY_COMPRESSED_STREAM] ) identifier[cls] . identifier[_compressed_stream_remainder_list] = identifier[remainder_list] identifier[cls] . identifier[_compressed_stream_store] = identifier[specification_store] keyword[if] identifier[cls] . identifier[_compressed_stream_scanner] keyword[is] keyword[None] : identifier[cls] . identifier[_compressed_stream_scanner] = identifier[cls] . identifier[_GetSignatureScanner] ( identifier[cls] . identifier[_compressed_stream_store] ) keyword[return] identifier[cls] . identifier[_GetTypeIndicators] ( identifier[cls] . identifier[_compressed_stream_scanner] , identifier[cls] . identifier[_compressed_stream_store] , identifier[cls] . identifier[_compressed_stream_remainder_list] , identifier[path_spec] , identifier[resolver_context] = identifier[resolver_context] )
def GetCompressedStreamTypeIndicators(cls, path_spec, resolver_context=None): """Determines if a file contains a supported compressed stream types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators. """ if cls._compressed_stream_remainder_list is None or cls._compressed_stream_store is None: (specification_store, remainder_list) = cls._GetSpecificationStore(definitions.FORMAT_CATEGORY_COMPRESSED_STREAM) cls._compressed_stream_remainder_list = remainder_list cls._compressed_stream_store = specification_store # depends on [control=['if'], data=[]] if cls._compressed_stream_scanner is None: cls._compressed_stream_scanner = cls._GetSignatureScanner(cls._compressed_stream_store) # depends on [control=['if'], data=[]] return cls._GetTypeIndicators(cls._compressed_stream_scanner, cls._compressed_stream_store, cls._compressed_stream_remainder_list, path_spec, resolver_context=resolver_context)
def _load_from_file(self, filename): """Find filename in tar, and load it""" if filename in self.fdata: return self.fdata[filename] else: filepath = find_in_tarball(self.tarloc, filename) return read_from_tarball(self.tarloc, filepath)
def function[_load_from_file, parameter[self, filename]]: constant[Find filename in tar, and load it] if compare[name[filename] in name[self].fdata] begin[:] return[call[name[self].fdata][name[filename]]]
keyword[def] identifier[_load_from_file] ( identifier[self] , identifier[filename] ): literal[string] keyword[if] identifier[filename] keyword[in] identifier[self] . identifier[fdata] : keyword[return] identifier[self] . identifier[fdata] [ identifier[filename] ] keyword[else] : identifier[filepath] = identifier[find_in_tarball] ( identifier[self] . identifier[tarloc] , identifier[filename] ) keyword[return] identifier[read_from_tarball] ( identifier[self] . identifier[tarloc] , identifier[filepath] )
def _load_from_file(self, filename): """Find filename in tar, and load it""" if filename in self.fdata: return self.fdata[filename] # depends on [control=['if'], data=['filename']] else: filepath = find_in_tarball(self.tarloc, filename) return read_from_tarball(self.tarloc, filepath)
def _assert_valid_permission(self, perm_str): """Raise D1 exception if ``perm_str`` is not a valid permission.""" if perm_str not in ORDERED_PERM_LIST: raise d1_common.types.exceptions.InvalidRequest( 0, 'Permission must be one of {}. perm_str="{}"'.format( ', '.join(ORDERED_PERM_LIST), perm_str ), )
def function[_assert_valid_permission, parameter[self, perm_str]]: constant[Raise D1 exception if ``perm_str`` is not a valid permission.] if compare[name[perm_str] <ast.NotIn object at 0x7da2590d7190> name[ORDERED_PERM_LIST]] begin[:] <ast.Raise object at 0x7da18dc04580>
keyword[def] identifier[_assert_valid_permission] ( identifier[self] , identifier[perm_str] ): literal[string] keyword[if] identifier[perm_str] keyword[not] keyword[in] identifier[ORDERED_PERM_LIST] : keyword[raise] identifier[d1_common] . identifier[types] . identifier[exceptions] . identifier[InvalidRequest] ( literal[int] , literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[ORDERED_PERM_LIST] ), identifier[perm_str] ), )
def _assert_valid_permission(self, perm_str): """Raise D1 exception if ``perm_str`` is not a valid permission.""" if perm_str not in ORDERED_PERM_LIST: raise d1_common.types.exceptions.InvalidRequest(0, 'Permission must be one of {}. perm_str="{}"'.format(', '.join(ORDERED_PERM_LIST), perm_str)) # depends on [control=['if'], data=['perm_str', 'ORDERED_PERM_LIST']]
def update_channels(self): """Update the channels list when a new group is selected.""" group_dict = {k['name']: i for i, k in enumerate(self.groups)} group_index = group_dict[self.idx_group.currentText()] self.one_grp = self.groups[group_index] self.idx_chan.clear() self.idx_chan.setSelectionMode(QAbstractItemView.ExtendedSelection) for chan in self.one_grp['chan_to_plot']: name = chan + '—(' + '+'.join(self.one_grp['ref_chan']) + ')' item = QListWidgetItem(name) self.idx_chan.addItem(item)
def function[update_channels, parameter[self]]: constant[Update the channels list when a new group is selected.] variable[group_dict] assign[=] <ast.DictComp object at 0x7da1b26afd00> variable[group_index] assign[=] call[name[group_dict]][call[name[self].idx_group.currentText, parameter[]]] name[self].one_grp assign[=] call[name[self].groups][name[group_index]] call[name[self].idx_chan.clear, parameter[]] call[name[self].idx_chan.setSelectionMode, parameter[name[QAbstractItemView].ExtendedSelection]] for taget[name[chan]] in starred[call[name[self].one_grp][constant[chan_to_plot]]] begin[:] variable[name] assign[=] binary_operation[binary_operation[binary_operation[name[chan] + constant[—(]] + call[constant[+].join, parameter[call[name[self].one_grp][constant[ref_chan]]]]] + constant[)]] variable[item] assign[=] call[name[QListWidgetItem], parameter[name[name]]] call[name[self].idx_chan.addItem, parameter[name[item]]]
keyword[def] identifier[update_channels] ( identifier[self] ): literal[string] identifier[group_dict] ={ identifier[k] [ literal[string] ]: identifier[i] keyword[for] identifier[i] , identifier[k] keyword[in] identifier[enumerate] ( identifier[self] . identifier[groups] )} identifier[group_index] = identifier[group_dict] [ identifier[self] . identifier[idx_group] . identifier[currentText] ()] identifier[self] . identifier[one_grp] = identifier[self] . identifier[groups] [ identifier[group_index] ] identifier[self] . identifier[idx_chan] . identifier[clear] () identifier[self] . identifier[idx_chan] . identifier[setSelectionMode] ( identifier[QAbstractItemView] . identifier[ExtendedSelection] ) keyword[for] identifier[chan] keyword[in] identifier[self] . identifier[one_grp] [ literal[string] ]: identifier[name] = identifier[chan] + literal[string] + literal[string] . identifier[join] ( identifier[self] . identifier[one_grp] [ literal[string] ])+ literal[string] identifier[item] = identifier[QListWidgetItem] ( identifier[name] ) identifier[self] . identifier[idx_chan] . identifier[addItem] ( identifier[item] )
def update_channels(self): """Update the channels list when a new group is selected.""" group_dict = {k['name']: i for (i, k) in enumerate(self.groups)} group_index = group_dict[self.idx_group.currentText()] self.one_grp = self.groups[group_index] self.idx_chan.clear() self.idx_chan.setSelectionMode(QAbstractItemView.ExtendedSelection) for chan in self.one_grp['chan_to_plot']: name = chan + '—(' + '+'.join(self.one_grp['ref_chan']) + ')' item = QListWidgetItem(name) self.idx_chan.addItem(item) # depends on [control=['for'], data=['chan']]
def proj_l1(x, radius=1, out=None): r"""Projection onto l1-ball. Projection onto:: ``{ x \in X | ||x||_1 \leq r}`` with ``r`` being the radius. Parameters ---------- space : `LinearSpace` Space / domain ``X``. radius : positive float, optional Radius ``r`` of the ball. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto an l1-ball can be computed by projection onto a simplex, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proximal_linfty : proximal for l-infinity norm proj_simplex : projection onto simplex """ if out is None: out = x.space.element() u = x.ufuncs.absolute() v = x.ufuncs.sign() proj_simplex(u, radius, out) out *= v return out
def function[proj_l1, parameter[x, radius, out]]: constant[Projection onto l1-ball. Projection onto:: ``{ x \in X | ||x||_1 \leq r}`` with ``r`` being the radius. Parameters ---------- space : `LinearSpace` Space / domain ``X``. radius : positive float, optional Radius ``r`` of the ball. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto an l1-ball can be computed by projection onto a simplex, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proximal_linfty : proximal for l-infinity norm proj_simplex : projection onto simplex ] if compare[name[out] is constant[None]] begin[:] variable[out] assign[=] call[name[x].space.element, parameter[]] variable[u] assign[=] call[name[x].ufuncs.absolute, parameter[]] variable[v] assign[=] call[name[x].ufuncs.sign, parameter[]] call[name[proj_simplex], parameter[name[u], name[radius], name[out]]] <ast.AugAssign object at 0x7da1b1e45db0> return[name[out]]
keyword[def] identifier[proj_l1] ( identifier[x] , identifier[radius] = literal[int] , identifier[out] = keyword[None] ): literal[string] keyword[if] identifier[out] keyword[is] keyword[None] : identifier[out] = identifier[x] . identifier[space] . identifier[element] () identifier[u] = identifier[x] . identifier[ufuncs] . identifier[absolute] () identifier[v] = identifier[x] . identifier[ufuncs] . identifier[sign] () identifier[proj_simplex] ( identifier[u] , identifier[radius] , identifier[out] ) identifier[out] *= identifier[v] keyword[return] identifier[out]
def proj_l1(x, radius=1, out=None): """Projection onto l1-ball. Projection onto:: ``{ x \\in X | ||x||_1 \\leq r}`` with ``r`` being the radius. Parameters ---------- space : `LinearSpace` Space / domain ``X``. radius : positive float, optional Radius ``r`` of the ball. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto an l1-ball can be computed by projection onto a simplex, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proximal_linfty : proximal for l-infinity norm proj_simplex : projection onto simplex """ if out is None: out = x.space.element() # depends on [control=['if'], data=['out']] u = x.ufuncs.absolute() v = x.ufuncs.sign() proj_simplex(u, radius, out) out *= v return out
def calculate_tip_probe_hotspots( tip_length: float, tip_probe_settings: tip_probe_config)\ -> List[HotSpot]: """ Generate a list of tuples describing motions for doing the xy part of tip probe based on the config's description of the tip probe box. """ # probe_dimensions is the external bounding box of the probe unit size_x, size_y, size_z = tip_probe_settings.dimensions rel_x_start = (size_x / 2) + tip_probe_settings.switch_clearance rel_y_start = (size_y / 2) + tip_probe_settings.switch_clearance # Ensure that the nozzle will clear the probe unit and tip will clear deck nozzle_safe_z = round((size_z - tip_length) + tip_probe_settings.z_clearance.normal, 3) z_start = max(tip_probe_settings.z_clearance.deck, nozzle_safe_z) switch_offset = tip_probe_settings.switch_offset # Each list item defines axis we are probing for, starting position vector # and travel distance neg_x = HotSpot('x', -rel_x_start, switch_offset[0], z_start, size_x) pos_x = HotSpot('x', rel_x_start, switch_offset[0], z_start, -size_x) neg_y = HotSpot('y', switch_offset[1], -rel_y_start, z_start, size_y) pos_y = HotSpot('y', switch_offset[1], rel_y_start, z_start, -size_y) z = HotSpot( 'z', 0, switch_offset[2], tip_probe_settings.center[2] + tip_probe_settings.z_clearance.start, -size_z) return [ neg_x, pos_x, neg_y, pos_y, z ]
def function[calculate_tip_probe_hotspots, parameter[tip_length, tip_probe_settings]]: constant[ Generate a list of tuples describing motions for doing the xy part of tip probe based on the config's description of the tip probe box. ] <ast.Tuple object at 0x7da20e9b2f50> assign[=] name[tip_probe_settings].dimensions variable[rel_x_start] assign[=] binary_operation[binary_operation[name[size_x] / constant[2]] + name[tip_probe_settings].switch_clearance] variable[rel_y_start] assign[=] binary_operation[binary_operation[name[size_y] / constant[2]] + name[tip_probe_settings].switch_clearance] variable[nozzle_safe_z] assign[=] call[name[round], parameter[binary_operation[binary_operation[name[size_z] - name[tip_length]] + name[tip_probe_settings].z_clearance.normal], constant[3]]] variable[z_start] assign[=] call[name[max], parameter[name[tip_probe_settings].z_clearance.deck, name[nozzle_safe_z]]] variable[switch_offset] assign[=] name[tip_probe_settings].switch_offset variable[neg_x] assign[=] call[name[HotSpot], parameter[constant[x], <ast.UnaryOp object at 0x7da20e9b2890>, call[name[switch_offset]][constant[0]], name[z_start], name[size_x]]] variable[pos_x] assign[=] call[name[HotSpot], parameter[constant[x], name[rel_x_start], call[name[switch_offset]][constant[0]], name[z_start], <ast.UnaryOp object at 0x7da20e9b33a0>]] variable[neg_y] assign[=] call[name[HotSpot], parameter[constant[y], call[name[switch_offset]][constant[1]], <ast.UnaryOp object at 0x7da18eb54250>, name[z_start], name[size_y]]] variable[pos_y] assign[=] call[name[HotSpot], parameter[constant[y], call[name[switch_offset]][constant[1]], name[rel_y_start], name[z_start], <ast.UnaryOp object at 0x7da18eb54610>]] variable[z] assign[=] call[name[HotSpot], parameter[constant[z], constant[0], call[name[switch_offset]][constant[2]], binary_operation[call[name[tip_probe_settings].center][constant[2]] + name[tip_probe_settings].z_clearance.start], <ast.UnaryOp object at 0x7da18eb54970>]] return[list[[<ast.Name object at 0x7da18eb57880>, <ast.Name object at 0x7da18eb57940>, <ast.Name object at 0x7da18eb548e0>, <ast.Name object at 0x7da18eb57cd0>, <ast.Name object at 0x7da18eb56170>]]]
keyword[def] identifier[calculate_tip_probe_hotspots] ( identifier[tip_length] : identifier[float] , identifier[tip_probe_settings] : identifier[tip_probe_config] )-> identifier[List] [ identifier[HotSpot] ]: literal[string] identifier[size_x] , identifier[size_y] , identifier[size_z] = identifier[tip_probe_settings] . identifier[dimensions] identifier[rel_x_start] =( identifier[size_x] / literal[int] )+ identifier[tip_probe_settings] . identifier[switch_clearance] identifier[rel_y_start] =( identifier[size_y] / literal[int] )+ identifier[tip_probe_settings] . identifier[switch_clearance] identifier[nozzle_safe_z] = identifier[round] (( identifier[size_z] - identifier[tip_length] ) + identifier[tip_probe_settings] . identifier[z_clearance] . identifier[normal] , literal[int] ) identifier[z_start] = identifier[max] ( identifier[tip_probe_settings] . identifier[z_clearance] . identifier[deck] , identifier[nozzle_safe_z] ) identifier[switch_offset] = identifier[tip_probe_settings] . identifier[switch_offset] identifier[neg_x] = identifier[HotSpot] ( literal[string] , - identifier[rel_x_start] , identifier[switch_offset] [ literal[int] ], identifier[z_start] , identifier[size_x] ) identifier[pos_x] = identifier[HotSpot] ( literal[string] , identifier[rel_x_start] , identifier[switch_offset] [ literal[int] ], identifier[z_start] , - identifier[size_x] ) identifier[neg_y] = identifier[HotSpot] ( literal[string] , identifier[switch_offset] [ literal[int] ], - identifier[rel_y_start] , identifier[z_start] , identifier[size_y] ) identifier[pos_y] = identifier[HotSpot] ( literal[string] , identifier[switch_offset] [ literal[int] ], identifier[rel_y_start] , identifier[z_start] , - identifier[size_y] ) identifier[z] = identifier[HotSpot] ( literal[string] , literal[int] , identifier[switch_offset] [ literal[int] ], identifier[tip_probe_settings] . identifier[center] [ literal[int] ]+ identifier[tip_probe_settings] . identifier[z_clearance] . identifier[start] , - identifier[size_z] ) keyword[return] [ identifier[neg_x] , identifier[pos_x] , identifier[neg_y] , identifier[pos_y] , identifier[z] ]
def calculate_tip_probe_hotspots(tip_length: float, tip_probe_settings: tip_probe_config) -> List[HotSpot]: """ Generate a list of tuples describing motions for doing the xy part of tip probe based on the config's description of the tip probe box. """ # probe_dimensions is the external bounding box of the probe unit (size_x, size_y, size_z) = tip_probe_settings.dimensions rel_x_start = size_x / 2 + tip_probe_settings.switch_clearance rel_y_start = size_y / 2 + tip_probe_settings.switch_clearance # Ensure that the nozzle will clear the probe unit and tip will clear deck nozzle_safe_z = round(size_z - tip_length + tip_probe_settings.z_clearance.normal, 3) z_start = max(tip_probe_settings.z_clearance.deck, nozzle_safe_z) switch_offset = tip_probe_settings.switch_offset # Each list item defines axis we are probing for, starting position vector # and travel distance neg_x = HotSpot('x', -rel_x_start, switch_offset[0], z_start, size_x) pos_x = HotSpot('x', rel_x_start, switch_offset[0], z_start, -size_x) neg_y = HotSpot('y', switch_offset[1], -rel_y_start, z_start, size_y) pos_y = HotSpot('y', switch_offset[1], rel_y_start, z_start, -size_y) z = HotSpot('z', 0, switch_offset[2], tip_probe_settings.center[2] + tip_probe_settings.z_clearance.start, -size_z) return [neg_x, pos_x, neg_y, pos_y, z]
def load_network_model(model): ''' Loads metabolic network models in metabolitics. :param str model: model name ''' if type(model) == str: if model in ['ecoli', 'textbook', 'salmonella']: return cb.test.create_test_model(model) elif model == 'recon2': return cb.io.load_json_model('%s/network_models/%s.json' % (DATASET_PATH, model)) if type(model) == cb.Model: return model
def function[load_network_model, parameter[model]]: constant[ Loads metabolic network models in metabolitics. :param str model: model name ] if compare[call[name[type], parameter[name[model]]] equal[==] name[str]] begin[:] if compare[name[model] in list[[<ast.Constant object at 0x7da1b0a1f370>, <ast.Constant object at 0x7da1b0a1ec50>, <ast.Constant object at 0x7da1b0a1f010>]]] begin[:] return[call[name[cb].test.create_test_model, parameter[name[model]]]] if compare[call[name[type], parameter[name[model]]] equal[==] name[cb].Model] begin[:] return[name[model]]
keyword[def] identifier[load_network_model] ( identifier[model] ): literal[string] keyword[if] identifier[type] ( identifier[model] )== identifier[str] : keyword[if] identifier[model] keyword[in] [ literal[string] , literal[string] , literal[string] ]: keyword[return] identifier[cb] . identifier[test] . identifier[create_test_model] ( identifier[model] ) keyword[elif] identifier[model] == literal[string] : keyword[return] identifier[cb] . identifier[io] . identifier[load_json_model] ( literal[string] % ( identifier[DATASET_PATH] , identifier[model] )) keyword[if] identifier[type] ( identifier[model] )== identifier[cb] . identifier[Model] : keyword[return] identifier[model]
def load_network_model(model): """ Loads metabolic network models in metabolitics. :param str model: model name """ if type(model) == str: if model in ['ecoli', 'textbook', 'salmonella']: return cb.test.create_test_model(model) # depends on [control=['if'], data=['model']] elif model == 'recon2': return cb.io.load_json_model('%s/network_models/%s.json' % (DATASET_PATH, model)) # depends on [control=['if'], data=['model']] # depends on [control=['if'], data=[]] if type(model) == cb.Model: return model # depends on [control=['if'], data=[]]
def deserialize_from_headers(headers): """Deserialize a DataONE Exception that is stored in a map of HTTP headers (used in responses to HTTP HEAD requests).""" return create_exception_by_name( _get_header(headers, 'DataONE-Exception-Name'), _get_header(headers, 'DataONE-Exception-DetailCode'), _get_header(headers, 'DataONE-Exception-Description'), _get_header(headers, 'DataONE-Exception-TraceInformation'), _get_header(headers, 'DataONE-Exception-Identifier'), _get_header(headers, 'DataONE-Exception-NodeId'), )
def function[deserialize_from_headers, parameter[headers]]: constant[Deserialize a DataONE Exception that is stored in a map of HTTP headers (used in responses to HTTP HEAD requests).] return[call[name[create_exception_by_name], parameter[call[name[_get_header], parameter[name[headers], constant[DataONE-Exception-Name]]], call[name[_get_header], parameter[name[headers], constant[DataONE-Exception-DetailCode]]], call[name[_get_header], parameter[name[headers], constant[DataONE-Exception-Description]]], call[name[_get_header], parameter[name[headers], constant[DataONE-Exception-TraceInformation]]], call[name[_get_header], parameter[name[headers], constant[DataONE-Exception-Identifier]]], call[name[_get_header], parameter[name[headers], constant[DataONE-Exception-NodeId]]]]]]
keyword[def] identifier[deserialize_from_headers] ( identifier[headers] ): literal[string] keyword[return] identifier[create_exception_by_name] ( identifier[_get_header] ( identifier[headers] , literal[string] ), identifier[_get_header] ( identifier[headers] , literal[string] ), identifier[_get_header] ( identifier[headers] , literal[string] ), identifier[_get_header] ( identifier[headers] , literal[string] ), identifier[_get_header] ( identifier[headers] , literal[string] ), identifier[_get_header] ( identifier[headers] , literal[string] ), )
def deserialize_from_headers(headers): """Deserialize a DataONE Exception that is stored in a map of HTTP headers (used in responses to HTTP HEAD requests).""" return create_exception_by_name(_get_header(headers, 'DataONE-Exception-Name'), _get_header(headers, 'DataONE-Exception-DetailCode'), _get_header(headers, 'DataONE-Exception-Description'), _get_header(headers, 'DataONE-Exception-TraceInformation'), _get_header(headers, 'DataONE-Exception-Identifier'), _get_header(headers, 'DataONE-Exception-NodeId'))
def time_slices_to_layers(graphs, interslice_weight=1, slice_attr='slice', vertex_id_attr='id', edge_type_attr='type', weight_attr='weight'): """ Convert time slices to layer graphs. Each graph is considered to represent a time slice. This function simply connects all the consecutive slices (i.e. the slice graph) with an ``interslice_weight``. The further conversion is then delegated to :func:`slices_to_layers`, which also provides further details. See Also -------- :func:`find_partition_temporal` :func:`slices_to_layers` """ G_slices = _ig.Graph.Tree(len(graphs), 1, mode=_ig.TREE_UNDIRECTED) G_slices.es[weight_attr] = interslice_weight G_slices.vs[slice_attr] = graphs return slices_to_layers(G_slices, slice_attr, vertex_id_attr, edge_type_attr, weight_attr)
def function[time_slices_to_layers, parameter[graphs, interslice_weight, slice_attr, vertex_id_attr, edge_type_attr, weight_attr]]: constant[ Convert time slices to layer graphs. Each graph is considered to represent a time slice. This function simply connects all the consecutive slices (i.e. the slice graph) with an ``interslice_weight``. The further conversion is then delegated to :func:`slices_to_layers`, which also provides further details. See Also -------- :func:`find_partition_temporal` :func:`slices_to_layers` ] variable[G_slices] assign[=] call[name[_ig].Graph.Tree, parameter[call[name[len], parameter[name[graphs]]], constant[1]]] call[name[G_slices].es][name[weight_attr]] assign[=] name[interslice_weight] call[name[G_slices].vs][name[slice_attr]] assign[=] name[graphs] return[call[name[slices_to_layers], parameter[name[G_slices], name[slice_attr], name[vertex_id_attr], name[edge_type_attr], name[weight_attr]]]]
keyword[def] identifier[time_slices_to_layers] ( identifier[graphs] , identifier[interslice_weight] = literal[int] , identifier[slice_attr] = literal[string] , identifier[vertex_id_attr] = literal[string] , identifier[edge_type_attr] = literal[string] , identifier[weight_attr] = literal[string] ): literal[string] identifier[G_slices] = identifier[_ig] . identifier[Graph] . identifier[Tree] ( identifier[len] ( identifier[graphs] ), literal[int] , identifier[mode] = identifier[_ig] . identifier[TREE_UNDIRECTED] ) identifier[G_slices] . identifier[es] [ identifier[weight_attr] ]= identifier[interslice_weight] identifier[G_slices] . identifier[vs] [ identifier[slice_attr] ]= identifier[graphs] keyword[return] identifier[slices_to_layers] ( identifier[G_slices] , identifier[slice_attr] , identifier[vertex_id_attr] , identifier[edge_type_attr] , identifier[weight_attr] )
def time_slices_to_layers(graphs, interslice_weight=1, slice_attr='slice', vertex_id_attr='id', edge_type_attr='type', weight_attr='weight'): """ Convert time slices to layer graphs. Each graph is considered to represent a time slice. This function simply connects all the consecutive slices (i.e. the slice graph) with an ``interslice_weight``. The further conversion is then delegated to :func:`slices_to_layers`, which also provides further details. See Also -------- :func:`find_partition_temporal` :func:`slices_to_layers` """ G_slices = _ig.Graph.Tree(len(graphs), 1, mode=_ig.TREE_UNDIRECTED) G_slices.es[weight_attr] = interslice_weight G_slices.vs[slice_attr] = graphs return slices_to_layers(G_slices, slice_attr, vertex_id_attr, edge_type_attr, weight_attr)
def main(): """Entry point when running as script from commandline.""" from docopt import docopt args = docopt(__doc__) infile = args['INFILE'] outfile = args['OUTFILE'] i3extract(infile, outfile)
def function[main, parameter[]]: constant[Entry point when running as script from commandline.] from relative_module[docopt] import module[docopt] variable[args] assign[=] call[name[docopt], parameter[name[__doc__]]] variable[infile] assign[=] call[name[args]][constant[INFILE]] variable[outfile] assign[=] call[name[args]][constant[OUTFILE]] call[name[i3extract], parameter[name[infile], name[outfile]]]
keyword[def] identifier[main] (): literal[string] keyword[from] identifier[docopt] keyword[import] identifier[docopt] identifier[args] = identifier[docopt] ( identifier[__doc__] ) identifier[infile] = identifier[args] [ literal[string] ] identifier[outfile] = identifier[args] [ literal[string] ] identifier[i3extract] ( identifier[infile] , identifier[outfile] )
def main(): """Entry point when running as script from commandline.""" from docopt import docopt args = docopt(__doc__) infile = args['INFILE'] outfile = args['OUTFILE'] i3extract(infile, outfile)
def domains(db, domain=None, top=False): """List the domains available in the registry. The function will return the list of domains. Settting the top flag, it will look for those domains that are top domains. If domain parameter is set, it will only return the information about that domain. When both paramaters are set, it will first search for the given domain. If it is not found, it will look for its top domains. In the case of neither the domain exists nor has top domains, a 'NotFoundError' exception will be raised. :param db: database manager :param domain: name of the domain :param top: filter by top domains :returns: a list of domains :raises NotFoundError: raised when the given domain is not found in the registry """ doms = [] with db.connect() as session: if domain: dom = find_domain(session, domain) if not dom: if not top: raise NotFoundError(entity=domain) else: # Adds a dot to the beggining of the domain. # Useful to compare domains like example.com and # myexample.com add_dot = lambda d: '.' + d if not d.startswith('.') else d d = add_dot(domain) tops = session.query(Domain).\ filter(Domain.is_top_domain).order_by(Domain.domain).all() doms = [t for t in tops if d.endswith(add_dot(t.domain))] if not doms: raise NotFoundError(entity=domain) else: doms = [dom] else: query = session.query(Domain) if top: query = query.filter(Domain.is_top_domain) doms = query.order_by(Domain.domain).all() # Detach objects from the session session.expunge_all() return doms
def function[domains, parameter[db, domain, top]]: constant[List the domains available in the registry. The function will return the list of domains. Settting the top flag, it will look for those domains that are top domains. If domain parameter is set, it will only return the information about that domain. When both paramaters are set, it will first search for the given domain. If it is not found, it will look for its top domains. In the case of neither the domain exists nor has top domains, a 'NotFoundError' exception will be raised. :param db: database manager :param domain: name of the domain :param top: filter by top domains :returns: a list of domains :raises NotFoundError: raised when the given domain is not found in the registry ] variable[doms] assign[=] list[[]] with call[name[db].connect, parameter[]] begin[:] if name[domain] begin[:] variable[dom] assign[=] call[name[find_domain], parameter[name[session], name[domain]]] if <ast.UnaryOp object at 0x7da1b0e678e0> begin[:] if <ast.UnaryOp object at 0x7da1b0e66440> begin[:] <ast.Raise object at 0x7da1b0e657b0> call[name[session].expunge_all, parameter[]] return[name[doms]]
keyword[def] identifier[domains] ( identifier[db] , identifier[domain] = keyword[None] , identifier[top] = keyword[False] ): literal[string] identifier[doms] =[] keyword[with] identifier[db] . identifier[connect] () keyword[as] identifier[session] : keyword[if] identifier[domain] : identifier[dom] = identifier[find_domain] ( identifier[session] , identifier[domain] ) keyword[if] keyword[not] identifier[dom] : keyword[if] keyword[not] identifier[top] : keyword[raise] identifier[NotFoundError] ( identifier[entity] = identifier[domain] ) keyword[else] : identifier[add_dot] = keyword[lambda] identifier[d] : literal[string] + identifier[d] keyword[if] keyword[not] identifier[d] . identifier[startswith] ( literal[string] ) keyword[else] identifier[d] identifier[d] = identifier[add_dot] ( identifier[domain] ) identifier[tops] = identifier[session] . identifier[query] ( identifier[Domain] ). identifier[filter] ( identifier[Domain] . identifier[is_top_domain] ). identifier[order_by] ( identifier[Domain] . identifier[domain] ). identifier[all] () identifier[doms] =[ identifier[t] keyword[for] identifier[t] keyword[in] identifier[tops] keyword[if] identifier[d] . identifier[endswith] ( identifier[add_dot] ( identifier[t] . identifier[domain] ))] keyword[if] keyword[not] identifier[doms] : keyword[raise] identifier[NotFoundError] ( identifier[entity] = identifier[domain] ) keyword[else] : identifier[doms] =[ identifier[dom] ] keyword[else] : identifier[query] = identifier[session] . identifier[query] ( identifier[Domain] ) keyword[if] identifier[top] : identifier[query] = identifier[query] . identifier[filter] ( identifier[Domain] . identifier[is_top_domain] ) identifier[doms] = identifier[query] . identifier[order_by] ( identifier[Domain] . identifier[domain] ). identifier[all] () identifier[session] . identifier[expunge_all] () keyword[return] identifier[doms]
def domains(db, domain=None, top=False): """List the domains available in the registry. The function will return the list of domains. Settting the top flag, it will look for those domains that are top domains. If domain parameter is set, it will only return the information about that domain. When both paramaters are set, it will first search for the given domain. If it is not found, it will look for its top domains. In the case of neither the domain exists nor has top domains, a 'NotFoundError' exception will be raised. :param db: database manager :param domain: name of the domain :param top: filter by top domains :returns: a list of domains :raises NotFoundError: raised when the given domain is not found in the registry """ doms = [] with db.connect() as session: if domain: dom = find_domain(session, domain) if not dom: if not top: raise NotFoundError(entity=domain) # depends on [control=['if'], data=[]] else: # Adds a dot to the beggining of the domain. # Useful to compare domains like example.com and # myexample.com add_dot = lambda d: '.' + d if not d.startswith('.') else d d = add_dot(domain) tops = session.query(Domain).filter(Domain.is_top_domain).order_by(Domain.domain).all() doms = [t for t in tops if d.endswith(add_dot(t.domain))] if not doms: raise NotFoundError(entity=domain) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: doms = [dom] # depends on [control=['if'], data=[]] else: query = session.query(Domain) if top: query = query.filter(Domain.is_top_domain) # depends on [control=['if'], data=[]] doms = query.order_by(Domain.domain).all() # Detach objects from the session session.expunge_all() # depends on [control=['with'], data=['session']] return doms
def _wait_job_completion(self): """Wait for the cache to be empty before resizing the pool.""" # Issue a warning to the user about the bad effect of this usage. if len(self._pending_work_items) > 0: warnings.warn("Trying to resize an executor with running jobs: " "waiting for jobs completion before resizing.", UserWarning) mp.util.debug("Executor {} waiting for jobs completion before" " resizing".format(self.executor_id)) # Wait for the completion of the jobs while len(self._pending_work_items) > 0: time.sleep(1e-3)
def function[_wait_job_completion, parameter[self]]: constant[Wait for the cache to be empty before resizing the pool.] if compare[call[name[len], parameter[name[self]._pending_work_items]] greater[>] constant[0]] begin[:] call[name[warnings].warn, parameter[constant[Trying to resize an executor with running jobs: waiting for jobs completion before resizing.], name[UserWarning]]] call[name[mp].util.debug, parameter[call[constant[Executor {} waiting for jobs completion before resizing].format, parameter[name[self].executor_id]]]] while compare[call[name[len], parameter[name[self]._pending_work_items]] greater[>] constant[0]] begin[:] call[name[time].sleep, parameter[constant[0.001]]]
keyword[def] identifier[_wait_job_completion] ( identifier[self] ): literal[string] keyword[if] identifier[len] ( identifier[self] . identifier[_pending_work_items] )> literal[int] : identifier[warnings] . identifier[warn] ( literal[string] literal[string] , identifier[UserWarning] ) identifier[mp] . identifier[util] . identifier[debug] ( literal[string] literal[string] . identifier[format] ( identifier[self] . identifier[executor_id] )) keyword[while] identifier[len] ( identifier[self] . identifier[_pending_work_items] )> literal[int] : identifier[time] . identifier[sleep] ( literal[int] )
def _wait_job_completion(self): """Wait for the cache to be empty before resizing the pool.""" # Issue a warning to the user about the bad effect of this usage. if len(self._pending_work_items) > 0: warnings.warn('Trying to resize an executor with running jobs: waiting for jobs completion before resizing.', UserWarning) mp.util.debug('Executor {} waiting for jobs completion before resizing'.format(self.executor_id)) # depends on [control=['if'], data=[]] # Wait for the completion of the jobs while len(self._pending_work_items) > 0: time.sleep(0.001) # depends on [control=['while'], data=[]]
def _render_item(self, depth, key, value = None, **settings): """ Format single list item. """ strptrn = self.INDENT * depth lchar = self.lchar(settings[self.SETTING_LIST_STYLE]) s = self._es_text(settings, settings[self.SETTING_LIST_FORMATING]) lchar = self.fmt_text(lchar, **s) strptrn = "{}" if value is not None: strptrn += ": {}" s = self._es_text(settings, settings[self.SETTING_TEXT_FORMATING]) strptrn = self.fmt_text(strptrn.format(key, value), **s) return '{} {} {}'.format(self.INDENT * depth, lchar, strptrn)
def function[_render_item, parameter[self, depth, key, value]]: constant[ Format single list item. ] variable[strptrn] assign[=] binary_operation[name[self].INDENT * name[depth]] variable[lchar] assign[=] call[name[self].lchar, parameter[call[name[settings]][name[self].SETTING_LIST_STYLE]]] variable[s] assign[=] call[name[self]._es_text, parameter[name[settings], call[name[settings]][name[self].SETTING_LIST_FORMATING]]] variable[lchar] assign[=] call[name[self].fmt_text, parameter[name[lchar]]] variable[strptrn] assign[=] constant[{}] if compare[name[value] is_not constant[None]] begin[:] <ast.AugAssign object at 0x7da1b2347d30> variable[s] assign[=] call[name[self]._es_text, parameter[name[settings], call[name[settings]][name[self].SETTING_TEXT_FORMATING]]] variable[strptrn] assign[=] call[name[self].fmt_text, parameter[call[name[strptrn].format, parameter[name[key], name[value]]]]] return[call[constant[{} {} {}].format, parameter[binary_operation[name[self].INDENT * name[depth]], name[lchar], name[strptrn]]]]
keyword[def] identifier[_render_item] ( identifier[self] , identifier[depth] , identifier[key] , identifier[value] = keyword[None] ,** identifier[settings] ): literal[string] identifier[strptrn] = identifier[self] . identifier[INDENT] * identifier[depth] identifier[lchar] = identifier[self] . identifier[lchar] ( identifier[settings] [ identifier[self] . identifier[SETTING_LIST_STYLE] ]) identifier[s] = identifier[self] . identifier[_es_text] ( identifier[settings] , identifier[settings] [ identifier[self] . identifier[SETTING_LIST_FORMATING] ]) identifier[lchar] = identifier[self] . identifier[fmt_text] ( identifier[lchar] ,** identifier[s] ) identifier[strptrn] = literal[string] keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] : identifier[strptrn] += literal[string] identifier[s] = identifier[self] . identifier[_es_text] ( identifier[settings] , identifier[settings] [ identifier[self] . identifier[SETTING_TEXT_FORMATING] ]) identifier[strptrn] = identifier[self] . identifier[fmt_text] ( identifier[strptrn] . identifier[format] ( identifier[key] , identifier[value] ),** identifier[s] ) keyword[return] literal[string] . identifier[format] ( identifier[self] . identifier[INDENT] * identifier[depth] , identifier[lchar] , identifier[strptrn] )
def _render_item(self, depth, key, value=None, **settings): """ Format single list item. """ strptrn = self.INDENT * depth lchar = self.lchar(settings[self.SETTING_LIST_STYLE]) s = self._es_text(settings, settings[self.SETTING_LIST_FORMATING]) lchar = self.fmt_text(lchar, **s) strptrn = '{}' if value is not None: strptrn += ': {}' # depends on [control=['if'], data=[]] s = self._es_text(settings, settings[self.SETTING_TEXT_FORMATING]) strptrn = self.fmt_text(strptrn.format(key, value), **s) return '{} {} {}'.format(self.INDENT * depth, lchar, strptrn)
def nvmlDeviceGetMemoryErrorCounter(handle, errorType, counterType, locationType): r""" /** * Retrieves the requested memory error counter for the device. * * For Fermi &tm; or newer fully supported devices. * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts. * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts. * * Only applicable to devices with ECC. * * Requires ECC Mode to be enabled. * * See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n * See \ref nvmlEccCounterType_t for a description of available counter types.\n * See \ref nvmlMemoryLocation_t for a description of available counter locations.\n * * @param device The identifier of the target device * @param errorType Flag that specifies the type of error. * @param counterType Flag that specifies the counter-type of the errors. * @param locationType Specifies the location of the counter. * @param count Reference in which to return the ECC counter * * @return * - \ref NVML_SUCCESS if \a count has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is * invalid, or \a count is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetMemoryErrorCounter """ c_count = c_ulonglong() fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryErrorCounter") ret = fn(handle, _nvmlMemoryErrorType_t(errorType), _nvmlEccCounterType_t(counterType), _nvmlMemoryLocation_t(locationType), byref(c_count)) _nvmlCheckReturn(ret) return bytes_to_str(c_count.value)
def function[nvmlDeviceGetMemoryErrorCounter, parameter[handle, errorType, counterType, locationType]]: constant[ /** * Retrieves the requested memory error counter for the device. * * For Fermi &tm; or newer fully supported devices. * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts. * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts. * * Only applicable to devices with ECC. * * Requires ECC Mode to be enabled. * * See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n * See \ref nvmlEccCounterType_t for a description of available counter types.\n * See \ref nvmlMemoryLocation_t for a description of available counter locations.\n * * @param device The identifier of the target device * @param errorType Flag that specifies the type of error. * @param counterType Flag that specifies the counter-type of the errors. * @param locationType Specifies the location of the counter. * @param count Reference in which to return the ECC counter * * @return * - \ref NVML_SUCCESS if \a count has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is * invalid, or \a count is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetMemoryErrorCounter ] variable[c_count] assign[=] call[name[c_ulonglong], parameter[]] variable[fn] assign[=] call[name[_nvmlGetFunctionPointer], parameter[constant[nvmlDeviceGetMemoryErrorCounter]]] variable[ret] assign[=] call[name[fn], parameter[name[handle], call[name[_nvmlMemoryErrorType_t], parameter[name[errorType]]], call[name[_nvmlEccCounterType_t], parameter[name[counterType]]], call[name[_nvmlMemoryLocation_t], parameter[name[locationType]]], call[name[byref], parameter[name[c_count]]]]] call[name[_nvmlCheckReturn], parameter[name[ret]]] return[call[name[bytes_to_str], parameter[name[c_count].value]]]
keyword[def] identifier[nvmlDeviceGetMemoryErrorCounter] ( identifier[handle] , identifier[errorType] , identifier[counterType] , identifier[locationType] ): literal[string] identifier[c_count] = identifier[c_ulonglong] () identifier[fn] = identifier[_nvmlGetFunctionPointer] ( literal[string] ) identifier[ret] = identifier[fn] ( identifier[handle] , identifier[_nvmlMemoryErrorType_t] ( identifier[errorType] ), identifier[_nvmlEccCounterType_t] ( identifier[counterType] ), identifier[_nvmlMemoryLocation_t] ( identifier[locationType] ), identifier[byref] ( identifier[c_count] )) identifier[_nvmlCheckReturn] ( identifier[ret] ) keyword[return] identifier[bytes_to_str] ( identifier[c_count] . identifier[value] )
def nvmlDeviceGetMemoryErrorCounter(handle, errorType, counterType, locationType): """ /** * Retrieves the requested memory error counter for the device. * * For Fermi &tm; or newer fully supported devices. * Requires \\a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts. * Requires \\a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts. * * Only applicable to devices with ECC. * * Requires ECC Mode to be enabled. * * See \\ref nvmlMemoryErrorType_t for a description of available memory error types.\\n * See \\ref nvmlEccCounterType_t for a description of available counter types.\\n * See \\ref nvmlMemoryLocation_t for a description of available counter locations.\\n * * @param device The identifier of the target device * @param errorType Flag that specifies the type of error. * @param counterType Flag that specifies the counter-type of the errors. * @param locationType Specifies the location of the counter. * @param count Reference in which to return the ECC counter * * @return * - \\ref NVML_SUCCESS if \\a count has been populated * - \\ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \\ref NVML_ERROR_INVALID_ARGUMENT if \\a device, \\a bitTyp,e \\a counterType or \\a locationType is * invalid, or \\a count is NULL * - \\ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory * - \\ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \\ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetMemoryErrorCounter """ c_count = c_ulonglong() fn = _nvmlGetFunctionPointer('nvmlDeviceGetMemoryErrorCounter') ret = fn(handle, _nvmlMemoryErrorType_t(errorType), _nvmlEccCounterType_t(counterType), _nvmlMemoryLocation_t(locationType), byref(c_count)) _nvmlCheckReturn(ret) return bytes_to_str(c_count.value)
def pauseProducing(self): """ Pause the reception of messages by canceling all existing consumers. This does not disconnect from the server. Message reception can be resumed with :meth:`resumeProducing`. Returns: Deferred: fired when the production is paused. """ if not self._running: return # Exit the read loop and cancel the consumer on the server. self._running = False for consumer in self._consumers.values(): yield consumer.channel.basic_cancel(consumer_tag=consumer.tag) _legacy_twisted_log.msg("Paused retrieval of messages for the server queue")
def function[pauseProducing, parameter[self]]: constant[ Pause the reception of messages by canceling all existing consumers. This does not disconnect from the server. Message reception can be resumed with :meth:`resumeProducing`. Returns: Deferred: fired when the production is paused. ] if <ast.UnaryOp object at 0x7da1b056a380> begin[:] return[None] name[self]._running assign[=] constant[False] for taget[name[consumer]] in starred[call[name[self]._consumers.values, parameter[]]] begin[:] <ast.Yield object at 0x7da1b056bc10> call[name[_legacy_twisted_log].msg, parameter[constant[Paused retrieval of messages for the server queue]]]
keyword[def] identifier[pauseProducing] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_running] : keyword[return] identifier[self] . identifier[_running] = keyword[False] keyword[for] identifier[consumer] keyword[in] identifier[self] . identifier[_consumers] . identifier[values] (): keyword[yield] identifier[consumer] . identifier[channel] . identifier[basic_cancel] ( identifier[consumer_tag] = identifier[consumer] . identifier[tag] ) identifier[_legacy_twisted_log] . identifier[msg] ( literal[string] )
def pauseProducing(self): """ Pause the reception of messages by canceling all existing consumers. This does not disconnect from the server. Message reception can be resumed with :meth:`resumeProducing`. Returns: Deferred: fired when the production is paused. """ if not self._running: return # depends on [control=['if'], data=[]] # Exit the read loop and cancel the consumer on the server. self._running = False for consumer in self._consumers.values(): yield consumer.channel.basic_cancel(consumer_tag=consumer.tag) # depends on [control=['for'], data=['consumer']] _legacy_twisted_log.msg('Paused retrieval of messages for the server queue')
def touch(fpath, mode=0o666, dir_fd=None, verbose=0, **kwargs): """ change file timestamps Works like the touch unix utility Args: fpath (PathLike): name of the file mode (int): file permissions (python3 and unix only) dir_fd (file): optional directory file descriptor. If specified, fpath is interpreted as relative to this descriptor (python 3 only). verbose (int): verbosity **kwargs : extra args passed to `os.utime` (python 3 only). Returns: PathLike: path to the file References: https://stackoverflow.com/questions/1158076/implement-touch-using-python Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> fpath = join(dpath, 'touch_file') >>> assert not exists(fpath) >>> ub.touch(fpath) >>> assert exists(fpath) >>> os.unlink(fpath) """ if verbose: print('Touching file {}'.format(fpath)) if six.PY2: # nocover with open(fpath, 'a'): os.utime(fpath, None) else: flags = os.O_CREAT | os.O_APPEND with os.fdopen(os.open(fpath, flags=flags, mode=mode, dir_fd=dir_fd)) as f: os.utime(f.fileno() if os.utime in os.supports_fd else fpath, dir_fd=None if os.supports_fd else dir_fd, **kwargs) return fpath
def function[touch, parameter[fpath, mode, dir_fd, verbose]]: constant[ change file timestamps Works like the touch unix utility Args: fpath (PathLike): name of the file mode (int): file permissions (python3 and unix only) dir_fd (file): optional directory file descriptor. If specified, fpath is interpreted as relative to this descriptor (python 3 only). verbose (int): verbosity **kwargs : extra args passed to `os.utime` (python 3 only). Returns: PathLike: path to the file References: https://stackoverflow.com/questions/1158076/implement-touch-using-python Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> fpath = join(dpath, 'touch_file') >>> assert not exists(fpath) >>> ub.touch(fpath) >>> assert exists(fpath) >>> os.unlink(fpath) ] if name[verbose] begin[:] call[name[print], parameter[call[constant[Touching file {}].format, parameter[name[fpath]]]]] if name[six].PY2 begin[:] with call[name[open], parameter[name[fpath], constant[a]]] begin[:] call[name[os].utime, parameter[name[fpath], constant[None]]] return[name[fpath]]
keyword[def] identifier[touch] ( identifier[fpath] , identifier[mode] = literal[int] , identifier[dir_fd] = keyword[None] , identifier[verbose] = literal[int] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[verbose] : identifier[print] ( literal[string] . identifier[format] ( identifier[fpath] )) keyword[if] identifier[six] . identifier[PY2] : keyword[with] identifier[open] ( identifier[fpath] , literal[string] ): identifier[os] . identifier[utime] ( identifier[fpath] , keyword[None] ) keyword[else] : identifier[flags] = identifier[os] . identifier[O_CREAT] | identifier[os] . identifier[O_APPEND] keyword[with] identifier[os] . identifier[fdopen] ( identifier[os] . identifier[open] ( identifier[fpath] , identifier[flags] = identifier[flags] , identifier[mode] = identifier[mode] , identifier[dir_fd] = identifier[dir_fd] )) keyword[as] identifier[f] : identifier[os] . identifier[utime] ( identifier[f] . identifier[fileno] () keyword[if] identifier[os] . identifier[utime] keyword[in] identifier[os] . identifier[supports_fd] keyword[else] identifier[fpath] , identifier[dir_fd] = keyword[None] keyword[if] identifier[os] . identifier[supports_fd] keyword[else] identifier[dir_fd] ,** identifier[kwargs] ) keyword[return] identifier[fpath]
def touch(fpath, mode=438, dir_fd=None, verbose=0, **kwargs): """ change file timestamps Works like the touch unix utility Args: fpath (PathLike): name of the file mode (int): file permissions (python3 and unix only) dir_fd (file): optional directory file descriptor. If specified, fpath is interpreted as relative to this descriptor (python 3 only). verbose (int): verbosity **kwargs : extra args passed to `os.utime` (python 3 only). Returns: PathLike: path to the file References: https://stackoverflow.com/questions/1158076/implement-touch-using-python Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> fpath = join(dpath, 'touch_file') >>> assert not exists(fpath) >>> ub.touch(fpath) >>> assert exists(fpath) >>> os.unlink(fpath) """ if verbose: print('Touching file {}'.format(fpath)) # depends on [control=['if'], data=[]] if six.PY2: # nocover with open(fpath, 'a'): os.utime(fpath, None) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] else: flags = os.O_CREAT | os.O_APPEND with os.fdopen(os.open(fpath, flags=flags, mode=mode, dir_fd=dir_fd)) as f: os.utime(f.fileno() if os.utime in os.supports_fd else fpath, dir_fd=None if os.supports_fd else dir_fd, **kwargs) # depends on [control=['with'], data=['f']] return fpath
def cdf(self, y, f, var): r""" Cumulative density function of the likelihood. Parameters ---------- y: ndarray query quantiles, i.e.\ :math:`P(Y \leq y)`. f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) var: float, ndarray, optional The variance of the distribution, if not input, the initial value of variance is used. Returns ------- cdf: ndarray Cumulative density function evaluated at y. """ var = self._check_param(var) return norm.cdf(y, loc=f, scale=np.sqrt(var))
def function[cdf, parameter[self, y, f, var]]: constant[ Cumulative density function of the likelihood. Parameters ---------- y: ndarray query quantiles, i.e.\ :math:`P(Y \leq y)`. f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) var: float, ndarray, optional The variance of the distribution, if not input, the initial value of variance is used. Returns ------- cdf: ndarray Cumulative density function evaluated at y. ] variable[var] assign[=] call[name[self]._check_param, parameter[name[var]]] return[call[name[norm].cdf, parameter[name[y]]]]
keyword[def] identifier[cdf] ( identifier[self] , identifier[y] , identifier[f] , identifier[var] ): literal[string] identifier[var] = identifier[self] . identifier[_check_param] ( identifier[var] ) keyword[return] identifier[norm] . identifier[cdf] ( identifier[y] , identifier[loc] = identifier[f] , identifier[scale] = identifier[np] . identifier[sqrt] ( identifier[var] ))
def cdf(self, y, f, var): """ Cumulative density function of the likelihood. Parameters ---------- y: ndarray query quantiles, i.e.\\ :math:`P(Y \\leq y)`. f: ndarray latent function from the GLM prior (:math:`\\mathbf{f} = \\boldsymbol\\Phi \\mathbf{w}`) var: float, ndarray, optional The variance of the distribution, if not input, the initial value of variance is used. Returns ------- cdf: ndarray Cumulative density function evaluated at y. """ var = self._check_param(var) return norm.cdf(y, loc=f, scale=np.sqrt(var))
def get_as_integer_with_default(self, index, default_value): """ Converts array element into an integer or returns default value if conversion is not possible. :param index: an index of element to get. :param default_value: the default value :return: integer value ot the element or default value if conversion is not supported. """ value = self[index] return IntegerConverter.to_integer_with_default(value, default_value)
def function[get_as_integer_with_default, parameter[self, index, default_value]]: constant[ Converts array element into an integer or returns default value if conversion is not possible. :param index: an index of element to get. :param default_value: the default value :return: integer value ot the element or default value if conversion is not supported. ] variable[value] assign[=] call[name[self]][name[index]] return[call[name[IntegerConverter].to_integer_with_default, parameter[name[value], name[default_value]]]]
keyword[def] identifier[get_as_integer_with_default] ( identifier[self] , identifier[index] , identifier[default_value] ): literal[string] identifier[value] = identifier[self] [ identifier[index] ] keyword[return] identifier[IntegerConverter] . identifier[to_integer_with_default] ( identifier[value] , identifier[default_value] )
def get_as_integer_with_default(self, index, default_value): """ Converts array element into an integer or returns default value if conversion is not possible. :param index: an index of element to get. :param default_value: the default value :return: integer value ot the element or default value if conversion is not supported. """ value = self[index] return IntegerConverter.to_integer_with_default(value, default_value)
def get_string_at_rva(self, rva): """Get an ASCII string located at the given address.""" s = self.get_section_by_rva(rva) if not s: return self.get_string_from_data(0, self.__data__[rva:rva+MAX_STRING_LENGTH]) return self.get_string_from_data( 0, s.get_data(rva, length=MAX_STRING_LENGTH) )
def function[get_string_at_rva, parameter[self, rva]]: constant[Get an ASCII string located at the given address.] variable[s] assign[=] call[name[self].get_section_by_rva, parameter[name[rva]]] if <ast.UnaryOp object at 0x7da1b0e440d0> begin[:] return[call[name[self].get_string_from_data, parameter[constant[0], call[name[self].__data__][<ast.Slice object at 0x7da1b0c50e80>]]]] return[call[name[self].get_string_from_data, parameter[constant[0], call[name[s].get_data, parameter[name[rva]]]]]]
keyword[def] identifier[get_string_at_rva] ( identifier[self] , identifier[rva] ): literal[string] identifier[s] = identifier[self] . identifier[get_section_by_rva] ( identifier[rva] ) keyword[if] keyword[not] identifier[s] : keyword[return] identifier[self] . identifier[get_string_from_data] ( literal[int] , identifier[self] . identifier[__data__] [ identifier[rva] : identifier[rva] + identifier[MAX_STRING_LENGTH] ]) keyword[return] identifier[self] . identifier[get_string_from_data] ( literal[int] , identifier[s] . identifier[get_data] ( identifier[rva] , identifier[length] = identifier[MAX_STRING_LENGTH] ))
def get_string_at_rva(self, rva): """Get an ASCII string located at the given address.""" s = self.get_section_by_rva(rva) if not s: return self.get_string_from_data(0, self.__data__[rva:rva + MAX_STRING_LENGTH]) # depends on [control=['if'], data=[]] return self.get_string_from_data(0, s.get_data(rva, length=MAX_STRING_LENGTH))
def insert_text(self, text, at_end=False, error=False, prompt=False): """ Insert text at the current cursor position or at the end of the command line """ if at_end: # Insert text at the end of the command line self.append_text_to_shell(text, error, prompt) else: # Insert text at current cursor position ConsoleBaseWidget.insert_text(self, text)
def function[insert_text, parameter[self, text, at_end, error, prompt]]: constant[ Insert text at the current cursor position or at the end of the command line ] if name[at_end] begin[:] call[name[self].append_text_to_shell, parameter[name[text], name[error], name[prompt]]]
keyword[def] identifier[insert_text] ( identifier[self] , identifier[text] , identifier[at_end] = keyword[False] , identifier[error] = keyword[False] , identifier[prompt] = keyword[False] ): literal[string] keyword[if] identifier[at_end] : identifier[self] . identifier[append_text_to_shell] ( identifier[text] , identifier[error] , identifier[prompt] ) keyword[else] : identifier[ConsoleBaseWidget] . identifier[insert_text] ( identifier[self] , identifier[text] )
def insert_text(self, text, at_end=False, error=False, prompt=False): """ Insert text at the current cursor position or at the end of the command line """ if at_end: # Insert text at the end of the command line self.append_text_to_shell(text, error, prompt) # depends on [control=['if'], data=[]] else: # Insert text at current cursor position ConsoleBaseWidget.insert_text(self, text)
def switch_to_plugin(self): """Switch to plugin.""" # Unmaxizime currently maximized plugin if (self.main.last_plugin is not None and self.main.last_plugin.ismaximized and self.main.last_plugin is not self): self.main.maximize_dockwidget() # Show plugin only if it was already visible if self.get_option('visible_if_project_open'): if not self.toggle_view_action.isChecked(): self.toggle_view_action.setChecked(True) self.visibility_changed(True)
def function[switch_to_plugin, parameter[self]]: constant[Switch to plugin.] if <ast.BoolOp object at 0x7da18dc076d0> begin[:] call[name[self].main.maximize_dockwidget, parameter[]] if call[name[self].get_option, parameter[constant[visible_if_project_open]]] begin[:] if <ast.UnaryOp object at 0x7da18dc073d0> begin[:] call[name[self].toggle_view_action.setChecked, parameter[constant[True]]] call[name[self].visibility_changed, parameter[constant[True]]]
keyword[def] identifier[switch_to_plugin] ( identifier[self] ): literal[string] keyword[if] ( identifier[self] . identifier[main] . identifier[last_plugin] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[main] . identifier[last_plugin] . identifier[ismaximized] keyword[and] identifier[self] . identifier[main] . identifier[last_plugin] keyword[is] keyword[not] identifier[self] ): identifier[self] . identifier[main] . identifier[maximize_dockwidget] () keyword[if] identifier[self] . identifier[get_option] ( literal[string] ): keyword[if] keyword[not] identifier[self] . identifier[toggle_view_action] . identifier[isChecked] (): identifier[self] . identifier[toggle_view_action] . identifier[setChecked] ( keyword[True] ) identifier[self] . identifier[visibility_changed] ( keyword[True] )
def switch_to_plugin(self): """Switch to plugin.""" # Unmaxizime currently maximized plugin if self.main.last_plugin is not None and self.main.last_plugin.ismaximized and (self.main.last_plugin is not self): self.main.maximize_dockwidget() # depends on [control=['if'], data=[]] # Show plugin only if it was already visible if self.get_option('visible_if_project_open'): if not self.toggle_view_action.isChecked(): self.toggle_view_action.setChecked(True) # depends on [control=['if'], data=[]] self.visibility_changed(True) # depends on [control=['if'], data=[]]
def save(self, clean=True): """Serialize into raw representation. Clears the dirty bit by default. Args: clean (bool): Whether to clear the dirty bit. Returns: dict: Raw. """ ret = {} if clean: self._dirty = False else: ret['_dirty'] = self._dirty return ret
def function[save, parameter[self, clean]]: constant[Serialize into raw representation. Clears the dirty bit by default. Args: clean (bool): Whether to clear the dirty bit. Returns: dict: Raw. ] variable[ret] assign[=] dictionary[[], []] if name[clean] begin[:] name[self]._dirty assign[=] constant[False] return[name[ret]]
keyword[def] identifier[save] ( identifier[self] , identifier[clean] = keyword[True] ): literal[string] identifier[ret] ={} keyword[if] identifier[clean] : identifier[self] . identifier[_dirty] = keyword[False] keyword[else] : identifier[ret] [ literal[string] ]= identifier[self] . identifier[_dirty] keyword[return] identifier[ret]
def save(self, clean=True): """Serialize into raw representation. Clears the dirty bit by default. Args: clean (bool): Whether to clear the dirty bit. Returns: dict: Raw. """ ret = {} if clean: self._dirty = False # depends on [control=['if'], data=[]] else: ret['_dirty'] = self._dirty return ret
def subscribe(self, user, verbose=None): """Returns a response after attempting to subscribe a member to the list. """ if not self.email_enabled: raise EmailNotEnabledError("See settings.EMAIL_ENABLED") if not user.email: raise UserEmailError(f"User {user}'s email address is not defined.") response = requests.post( f"{self.api_url}/{self.address}/members", auth=("api", self.api_key), data={ "subscribed": True, "address": user.email, "name": f"{user.first_name} {user.last_name}", "description": f'{user.userprofile.job_title or ""}', "upsert": "yes", }, ) if verbose: sys.stdout.write( f"Subscribing {user.email} to {self.address}. " f"Got response={response.status_code}.\n" ) return response
def function[subscribe, parameter[self, user, verbose]]: constant[Returns a response after attempting to subscribe a member to the list. ] if <ast.UnaryOp object at 0x7da2046216c0> begin[:] <ast.Raise object at 0x7da204622c50> if <ast.UnaryOp object at 0x7da2046223e0> begin[:] <ast.Raise object at 0x7da204620550> variable[response] assign[=] call[name[requests].post, parameter[<ast.JoinedStr object at 0x7da204622b30>]] if name[verbose] begin[:] call[name[sys].stdout.write, parameter[<ast.JoinedStr object at 0x7da204620e80>]] return[name[response]]
keyword[def] identifier[subscribe] ( identifier[self] , identifier[user] , identifier[verbose] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[email_enabled] : keyword[raise] identifier[EmailNotEnabledError] ( literal[string] ) keyword[if] keyword[not] identifier[user] . identifier[email] : keyword[raise] identifier[UserEmailError] ( literal[string] ) identifier[response] = identifier[requests] . identifier[post] ( literal[string] , identifier[auth] =( literal[string] , identifier[self] . identifier[api_key] ), identifier[data] ={ literal[string] : keyword[True] , literal[string] : identifier[user] . identifier[email] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }, ) keyword[if] identifier[verbose] : identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] literal[string] ) keyword[return] identifier[response]
def subscribe(self, user, verbose=None): """Returns a response after attempting to subscribe a member to the list. """ if not self.email_enabled: raise EmailNotEnabledError('See settings.EMAIL_ENABLED') # depends on [control=['if'], data=[]] if not user.email: raise UserEmailError(f"User {user}'s email address is not defined.") # depends on [control=['if'], data=[]] response = requests.post(f'{self.api_url}/{self.address}/members', auth=('api', self.api_key), data={'subscribed': True, 'address': user.email, 'name': f'{user.first_name} {user.last_name}', 'description': f"{user.userprofile.job_title or ''}", 'upsert': 'yes'}) if verbose: sys.stdout.write(f'Subscribing {user.email} to {self.address}. Got response={response.status_code}.\n') # depends on [control=['if'], data=[]] return response
def WriteBuffer(self, responses): """Write the hash received to the blob image.""" index = responses.request_data["index"] if index not in self.state.pending_files: return # Failed to read the file - ignore it. if not responses.success: self._FileFetchFailed(index, responses.request.request.name) return response = responses.First() file_tracker = self.state.pending_files.get(index) if file_tracker: blob_dict = file_tracker.setdefault("blobs", {}) blob_index = responses.request_data["blob_index"] blob_dict[blob_index] = (response.data, response.length) if len(blob_dict) == len(file_tracker["hash_list"]): # Write the file to the data store. stat_entry = file_tracker["stat_entry"] urn = stat_entry.pathspec.AFF4Path(self.client_urn) if data_store.AFF4Enabled(): with aff4.FACTORY.Create( urn, aff4_grr.VFSBlobImage, mode="w", token=self.token) as fd: fd.SetChunksize(self.CHUNK_SIZE) fd.Set(fd.Schema.STAT(stat_entry)) fd.Set(fd.Schema.PATHSPEC(stat_entry.pathspec)) fd.Set(fd.Schema.CONTENT_LAST(rdfvalue.RDFDatetime().Now())) for index in sorted(blob_dict): digest, length = blob_dict[index] fd.AddBlob(rdf_objects.BlobID.FromBytes(digest), length) if data_store.RelationalDBEnabled(): path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry) # Adding files to filestore requires reading data from RELDB, # thus protecting this code with a filestore-read-enabled check. if data_store.RelationalDBEnabled(): blob_refs = [] offset = 0 for index in sorted(blob_dict): digest, size = blob_dict[index] blob_refs.append( rdf_objects.BlobReference( offset=offset, size=size, blob_id=rdf_objects.BlobID.FromBytes(digest))) offset += size hash_obj = file_tracker["hash_obj"] client_path = db.ClientPath.FromPathInfo(self.client_id, path_info) hash_id = file_store.AddFileWithUnknownHash( client_path, blob_refs, use_external_stores=self.state.use_external_stores) # If the hash that we've calculated matches what we got from the # client, then simply store the full hash entry. # Otherwise store just the hash that we've calculated. if hash_id.AsBytes() == hash_obj.sha256: path_info.hash_entry = hash_obj else: path_info.hash_entry.sha256 = hash_id.AsBytes() data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) if (not data_store.RelationalDBEnabled() and self.state.use_external_stores): # Publish the new file event to cause the file to be added to the # filestore. events.Events.PublishEvent( "LegacyFileStore.AddFileToStore", urn, token=self.token) # Save some space. del file_tracker["blobs"] del file_tracker["hash_list"] # File done, remove from the store and close it. self._ReceiveFetchedFile(file_tracker) self.state.files_fetched += 1 if not self.state.files_fetched % 100: self.Log("Fetched %d of %d files.", self.state.files_fetched, self.state.files_to_fetch)
def function[WriteBuffer, parameter[self, responses]]: constant[Write the hash received to the blob image.] variable[index] assign[=] call[name[responses].request_data][constant[index]] if compare[name[index] <ast.NotIn object at 0x7da2590d7190> name[self].state.pending_files] begin[:] return[None] if <ast.UnaryOp object at 0x7da1b1b14490> begin[:] call[name[self]._FileFetchFailed, parameter[name[index], name[responses].request.request.name]] return[None] variable[response] assign[=] call[name[responses].First, parameter[]] variable[file_tracker] assign[=] call[name[self].state.pending_files.get, parameter[name[index]]] if name[file_tracker] begin[:] variable[blob_dict] assign[=] call[name[file_tracker].setdefault, parameter[constant[blobs], dictionary[[], []]]] variable[blob_index] assign[=] call[name[responses].request_data][constant[blob_index]] call[name[blob_dict]][name[blob_index]] assign[=] tuple[[<ast.Attribute object at 0x7da1b1ce6aa0>, <ast.Attribute object at 0x7da1b1ce4940>]] if compare[call[name[len], parameter[name[blob_dict]]] equal[==] call[name[len], parameter[call[name[file_tracker]][constant[hash_list]]]]] begin[:] variable[stat_entry] assign[=] call[name[file_tracker]][constant[stat_entry]] variable[urn] assign[=] call[name[stat_entry].pathspec.AFF4Path, parameter[name[self].client_urn]] if call[name[data_store].AFF4Enabled, parameter[]] begin[:] with call[name[aff4].FACTORY.Create, parameter[name[urn], name[aff4_grr].VFSBlobImage]] begin[:] call[name[fd].SetChunksize, parameter[name[self].CHUNK_SIZE]] call[name[fd].Set, parameter[call[name[fd].Schema.STAT, parameter[name[stat_entry]]]]] call[name[fd].Set, parameter[call[name[fd].Schema.PATHSPEC, parameter[name[stat_entry].pathspec]]]] call[name[fd].Set, parameter[call[name[fd].Schema.CONTENT_LAST, parameter[call[call[name[rdfvalue].RDFDatetime, parameter[]].Now, parameter[]]]]]] for taget[name[index]] in starred[call[name[sorted], parameter[name[blob_dict]]]] begin[:] <ast.Tuple object at 0x7da1b1b159f0> assign[=] call[name[blob_dict]][name[index]] call[name[fd].AddBlob, parameter[call[name[rdf_objects].BlobID.FromBytes, parameter[name[digest]]], name[length]]] if call[name[data_store].RelationalDBEnabled, parameter[]] begin[:] variable[path_info] assign[=] call[name[rdf_objects].PathInfo.FromStatEntry, parameter[name[stat_entry]]] if call[name[data_store].RelationalDBEnabled, parameter[]] begin[:] variable[blob_refs] assign[=] list[[]] variable[offset] assign[=] constant[0] for taget[name[index]] in starred[call[name[sorted], parameter[name[blob_dict]]]] begin[:] <ast.Tuple object at 0x7da1b1b16230> assign[=] call[name[blob_dict]][name[index]] call[name[blob_refs].append, parameter[call[name[rdf_objects].BlobReference, parameter[]]]] <ast.AugAssign object at 0x7da1b1b166b0> variable[hash_obj] assign[=] call[name[file_tracker]][constant[hash_obj]] variable[client_path] assign[=] call[name[db].ClientPath.FromPathInfo, parameter[name[self].client_id, name[path_info]]] variable[hash_id] assign[=] call[name[file_store].AddFileWithUnknownHash, parameter[name[client_path], name[blob_refs]]] if compare[call[name[hash_id].AsBytes, parameter[]] equal[==] name[hash_obj].sha256] begin[:] name[path_info].hash_entry assign[=] name[hash_obj] call[name[data_store].REL_DB.WritePathInfos, parameter[name[self].client_id, list[[<ast.Name object at 0x7da1b1b170d0>]]]] if <ast.BoolOp object at 0x7da1b1b17160> begin[:] call[name[events].Events.PublishEvent, parameter[constant[LegacyFileStore.AddFileToStore], name[urn]]] <ast.Delete object at 0x7da1b1b6f820> <ast.Delete object at 0x7da1b1b6f730> call[name[self]._ReceiveFetchedFile, parameter[name[file_tracker]]] <ast.AugAssign object at 0x7da1b1b6f520> if <ast.UnaryOp object at 0x7da1b1b6f400> begin[:] call[name[self].Log, parameter[constant[Fetched %d of %d files.], name[self].state.files_fetched, name[self].state.files_to_fetch]]
keyword[def] identifier[WriteBuffer] ( identifier[self] , identifier[responses] ): literal[string] identifier[index] = identifier[responses] . identifier[request_data] [ literal[string] ] keyword[if] identifier[index] keyword[not] keyword[in] identifier[self] . identifier[state] . identifier[pending_files] : keyword[return] keyword[if] keyword[not] identifier[responses] . identifier[success] : identifier[self] . identifier[_FileFetchFailed] ( identifier[index] , identifier[responses] . identifier[request] . identifier[request] . identifier[name] ) keyword[return] identifier[response] = identifier[responses] . identifier[First] () identifier[file_tracker] = identifier[self] . identifier[state] . identifier[pending_files] . identifier[get] ( identifier[index] ) keyword[if] identifier[file_tracker] : identifier[blob_dict] = identifier[file_tracker] . identifier[setdefault] ( literal[string] ,{}) identifier[blob_index] = identifier[responses] . identifier[request_data] [ literal[string] ] identifier[blob_dict] [ identifier[blob_index] ]=( identifier[response] . identifier[data] , identifier[response] . identifier[length] ) keyword[if] identifier[len] ( identifier[blob_dict] )== identifier[len] ( identifier[file_tracker] [ literal[string] ]): identifier[stat_entry] = identifier[file_tracker] [ literal[string] ] identifier[urn] = identifier[stat_entry] . identifier[pathspec] . identifier[AFF4Path] ( identifier[self] . identifier[client_urn] ) keyword[if] identifier[data_store] . identifier[AFF4Enabled] (): keyword[with] identifier[aff4] . identifier[FACTORY] . identifier[Create] ( identifier[urn] , identifier[aff4_grr] . identifier[VFSBlobImage] , identifier[mode] = literal[string] , identifier[token] = identifier[self] . identifier[token] ) keyword[as] identifier[fd] : identifier[fd] . identifier[SetChunksize] ( identifier[self] . identifier[CHUNK_SIZE] ) identifier[fd] . identifier[Set] ( identifier[fd] . identifier[Schema] . identifier[STAT] ( identifier[stat_entry] )) identifier[fd] . identifier[Set] ( identifier[fd] . identifier[Schema] . identifier[PATHSPEC] ( identifier[stat_entry] . identifier[pathspec] )) identifier[fd] . identifier[Set] ( identifier[fd] . identifier[Schema] . identifier[CONTENT_LAST] ( identifier[rdfvalue] . identifier[RDFDatetime] (). identifier[Now] ())) keyword[for] identifier[index] keyword[in] identifier[sorted] ( identifier[blob_dict] ): identifier[digest] , identifier[length] = identifier[blob_dict] [ identifier[index] ] identifier[fd] . identifier[AddBlob] ( identifier[rdf_objects] . identifier[BlobID] . identifier[FromBytes] ( identifier[digest] ), identifier[length] ) keyword[if] identifier[data_store] . identifier[RelationalDBEnabled] (): identifier[path_info] = identifier[rdf_objects] . identifier[PathInfo] . identifier[FromStatEntry] ( identifier[stat_entry] ) keyword[if] identifier[data_store] . identifier[RelationalDBEnabled] (): identifier[blob_refs] =[] identifier[offset] = literal[int] keyword[for] identifier[index] keyword[in] identifier[sorted] ( identifier[blob_dict] ): identifier[digest] , identifier[size] = identifier[blob_dict] [ identifier[index] ] identifier[blob_refs] . identifier[append] ( identifier[rdf_objects] . identifier[BlobReference] ( identifier[offset] = identifier[offset] , identifier[size] = identifier[size] , identifier[blob_id] = identifier[rdf_objects] . identifier[BlobID] . identifier[FromBytes] ( identifier[digest] ))) identifier[offset] += identifier[size] identifier[hash_obj] = identifier[file_tracker] [ literal[string] ] identifier[client_path] = identifier[db] . identifier[ClientPath] . identifier[FromPathInfo] ( identifier[self] . identifier[client_id] , identifier[path_info] ) identifier[hash_id] = identifier[file_store] . identifier[AddFileWithUnknownHash] ( identifier[client_path] , identifier[blob_refs] , identifier[use_external_stores] = identifier[self] . identifier[state] . identifier[use_external_stores] ) keyword[if] identifier[hash_id] . identifier[AsBytes] ()== identifier[hash_obj] . identifier[sha256] : identifier[path_info] . identifier[hash_entry] = identifier[hash_obj] keyword[else] : identifier[path_info] . identifier[hash_entry] . identifier[sha256] = identifier[hash_id] . identifier[AsBytes] () identifier[data_store] . identifier[REL_DB] . identifier[WritePathInfos] ( identifier[self] . identifier[client_id] ,[ identifier[path_info] ]) keyword[if] ( keyword[not] identifier[data_store] . identifier[RelationalDBEnabled] () keyword[and] identifier[self] . identifier[state] . identifier[use_external_stores] ): identifier[events] . identifier[Events] . identifier[PublishEvent] ( literal[string] , identifier[urn] , identifier[token] = identifier[self] . identifier[token] ) keyword[del] identifier[file_tracker] [ literal[string] ] keyword[del] identifier[file_tracker] [ literal[string] ] identifier[self] . identifier[_ReceiveFetchedFile] ( identifier[file_tracker] ) identifier[self] . identifier[state] . identifier[files_fetched] += literal[int] keyword[if] keyword[not] identifier[self] . identifier[state] . identifier[files_fetched] % literal[int] : identifier[self] . identifier[Log] ( literal[string] , identifier[self] . identifier[state] . identifier[files_fetched] , identifier[self] . identifier[state] . identifier[files_to_fetch] )
def WriteBuffer(self, responses): """Write the hash received to the blob image.""" index = responses.request_data['index'] if index not in self.state.pending_files: return # depends on [control=['if'], data=[]] # Failed to read the file - ignore it. if not responses.success: self._FileFetchFailed(index, responses.request.request.name) return # depends on [control=['if'], data=[]] response = responses.First() file_tracker = self.state.pending_files.get(index) if file_tracker: blob_dict = file_tracker.setdefault('blobs', {}) blob_index = responses.request_data['blob_index'] blob_dict[blob_index] = (response.data, response.length) if len(blob_dict) == len(file_tracker['hash_list']): # Write the file to the data store. stat_entry = file_tracker['stat_entry'] urn = stat_entry.pathspec.AFF4Path(self.client_urn) if data_store.AFF4Enabled(): with aff4.FACTORY.Create(urn, aff4_grr.VFSBlobImage, mode='w', token=self.token) as fd: fd.SetChunksize(self.CHUNK_SIZE) fd.Set(fd.Schema.STAT(stat_entry)) fd.Set(fd.Schema.PATHSPEC(stat_entry.pathspec)) fd.Set(fd.Schema.CONTENT_LAST(rdfvalue.RDFDatetime().Now())) for index in sorted(blob_dict): (digest, length) = blob_dict[index] fd.AddBlob(rdf_objects.BlobID.FromBytes(digest), length) # depends on [control=['for'], data=['index']] # depends on [control=['with'], data=['fd']] # depends on [control=['if'], data=[]] if data_store.RelationalDBEnabled(): path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry) # Adding files to filestore requires reading data from RELDB, # thus protecting this code with a filestore-read-enabled check. if data_store.RelationalDBEnabled(): blob_refs = [] offset = 0 for index in sorted(blob_dict): (digest, size) = blob_dict[index] blob_refs.append(rdf_objects.BlobReference(offset=offset, size=size, blob_id=rdf_objects.BlobID.FromBytes(digest))) offset += size # depends on [control=['for'], data=['index']] hash_obj = file_tracker['hash_obj'] client_path = db.ClientPath.FromPathInfo(self.client_id, path_info) hash_id = file_store.AddFileWithUnknownHash(client_path, blob_refs, use_external_stores=self.state.use_external_stores) # If the hash that we've calculated matches what we got from the # client, then simply store the full hash entry. # Otherwise store just the hash that we've calculated. if hash_id.AsBytes() == hash_obj.sha256: path_info.hash_entry = hash_obj # depends on [control=['if'], data=[]] else: path_info.hash_entry.sha256 = hash_id.AsBytes() # depends on [control=['if'], data=[]] data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) # depends on [control=['if'], data=[]] if not data_store.RelationalDBEnabled() and self.state.use_external_stores: # Publish the new file event to cause the file to be added to the # filestore. events.Events.PublishEvent('LegacyFileStore.AddFileToStore', urn, token=self.token) # depends on [control=['if'], data=[]] # Save some space. del file_tracker['blobs'] del file_tracker['hash_list'] # File done, remove from the store and close it. self._ReceiveFetchedFile(file_tracker) self.state.files_fetched += 1 if not self.state.files_fetched % 100: self.Log('Fetched %d of %d files.', self.state.files_fetched, self.state.files_to_fetch) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def search( self, q="yellow flower", lang="en", video_type="all", category="", min_width=0, min_height=0, editors_choice="false", safesearch="false", order="popular", page=1, per_page=20, callback="", pretty="false", ): """returns videos API data in dict Videos search :param q :type str :desc A URL encoded search term. If omitted, all images are returned. This value may not exceed 100 characters. Example: "yellow+flower" Default: "yellow+flower" :param lang :type str :desc Language code of the language to be searched in. Accepted values: cs, da, de, en, es, fr, id, it, hu, nl, no, pl, pt, ro, sk, fi, sv, tr, vi, th, bg, ru, el, ja, ko, zh Default: "en" For more info, see https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes :param video_type :type str :desc Filter results by video type. Accepted values: "all", "film", "animation" Default: "all" :param category :type str :desc Filter results by category. Accepted values: fashion, nature, backgrounds, science, education, people, feelings, religion, health, places, animals, industry, food, computer, sports, transportation, travel, buildings, business, music :param min_width :type int :desc Minimum image width Default: 0 :param min_height :type int :desc Minimum image height Default: 0 :param editors_choice :type bool (python-pixabay use "true" and "false" string instead) :desc Select images that have received an Editor's Choice award. Accepted values: "true", "false" Default: "false" :param safesearch :type bool (python-pixabay use "true" and "false" string instead) :desc A flag indicating that only images suitable for all ages should be returned. Accepted values: "true", "false" Default: "false" :param order :type str :desc How the results should be ordered. Accepted values: "popular", "latest" Default: "popular" :param page :type int :desc Returned search results are paginated. Use this parameter to select the page number. Default: 1 :param per_page :type int :desc Determine the number of results per page. Accepted values: 3 - 200 Default: 20 :param callback :type str :desc JSONP callback function name :param pretty :type bool (python-pixabay use "true" and "false" string instead) :desc Indent JSON output. This option should not be used in production. Accepted values: "true", "false" Default: "false" Code Example >>> from pixabay import Video >>> >>> video = Video("api_key") >>> video.search(q="apple", page=1) """ payload = { "key": self.api_key, "q": q, "lang": lang, "video_type": video_type, "category": category, "min_width": min_width, "min_height": min_height, "editors_choice": editors_choice, "safesearch": safesearch, "order": order, "page": page, "per_page": per_page, "callback": callback, "pretty": pretty, } resp = get(self.root_url + "videos/", params=payload) if resp.status_code == 200: return resp.json() else: raise ValueError(resp.text)
def function[search, parameter[self, q, lang, video_type, category, min_width, min_height, editors_choice, safesearch, order, page, per_page, callback, pretty]]: constant[returns videos API data in dict Videos search :param q :type str :desc A URL encoded search term. If omitted, all images are returned. This value may not exceed 100 characters. Example: "yellow+flower" Default: "yellow+flower" :param lang :type str :desc Language code of the language to be searched in. Accepted values: cs, da, de, en, es, fr, id, it, hu, nl, no, pl, pt, ro, sk, fi, sv, tr, vi, th, bg, ru, el, ja, ko, zh Default: "en" For more info, see https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes :param video_type :type str :desc Filter results by video type. Accepted values: "all", "film", "animation" Default: "all" :param category :type str :desc Filter results by category. Accepted values: fashion, nature, backgrounds, science, education, people, feelings, religion, health, places, animals, industry, food, computer, sports, transportation, travel, buildings, business, music :param min_width :type int :desc Minimum image width Default: 0 :param min_height :type int :desc Minimum image height Default: 0 :param editors_choice :type bool (python-pixabay use "true" and "false" string instead) :desc Select images that have received an Editor's Choice award. Accepted values: "true", "false" Default: "false" :param safesearch :type bool (python-pixabay use "true" and "false" string instead) :desc A flag indicating that only images suitable for all ages should be returned. Accepted values: "true", "false" Default: "false" :param order :type str :desc How the results should be ordered. Accepted values: "popular", "latest" Default: "popular" :param page :type int :desc Returned search results are paginated. Use this parameter to select the page number. Default: 1 :param per_page :type int :desc Determine the number of results per page. Accepted values: 3 - 200 Default: 20 :param callback :type str :desc JSONP callback function name :param pretty :type bool (python-pixabay use "true" and "false" string instead) :desc Indent JSON output. This option should not be used in production. Accepted values: "true", "false" Default: "false" Code Example >>> from pixabay import Video >>> >>> video = Video("api_key") >>> video.search(q="apple", page=1) ] variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da2044c2cb0>, <ast.Constant object at 0x7da2044c05e0>, <ast.Constant object at 0x7da2044c2b90>, <ast.Constant object at 0x7da2044c12a0>, <ast.Constant object at 0x7da2044c18d0>, <ast.Constant object at 0x7da2044c1300>, <ast.Constant object at 0x7da2044c1210>, <ast.Constant object at 0x7da2044c1b70>, <ast.Constant object at 0x7da2044c3010>, <ast.Constant object at 0x7da2044c36d0>, <ast.Constant object at 0x7da2044c38b0>, <ast.Constant object at 0x7da2044c0f40>, <ast.Constant object at 0x7da2044c1030>, <ast.Constant object at 0x7da2044c13f0>], [<ast.Attribute object at 0x7da2044c1b40>, <ast.Name object at 0x7da2044c2f80>, <ast.Name object at 0x7da2044c3820>, <ast.Name object at 0x7da2044c38e0>, <ast.Name object at 0x7da2044c0580>, <ast.Name object at 0x7da2044c3ca0>, <ast.Name object at 0x7da2044c39a0>, <ast.Name object at 0x7da2044c2b00>, <ast.Name object at 0x7da2044c3610>, <ast.Name object at 0x7da2044c3400>, <ast.Name object at 0x7da2044c2aa0>, <ast.Name object at 0x7da2044c01c0>, <ast.Name object at 0x7da2044c0ca0>, <ast.Name object at 0x7da2044c0910>]] variable[resp] assign[=] call[name[get], parameter[binary_operation[name[self].root_url + constant[videos/]]]] if compare[name[resp].status_code equal[==] constant[200]] begin[:] return[call[name[resp].json, parameter[]]]
keyword[def] identifier[search] ( identifier[self] , identifier[q] = literal[string] , identifier[lang] = literal[string] , identifier[video_type] = literal[string] , identifier[category] = literal[string] , identifier[min_width] = literal[int] , identifier[min_height] = literal[int] , identifier[editors_choice] = literal[string] , identifier[safesearch] = literal[string] , identifier[order] = literal[string] , identifier[page] = literal[int] , identifier[per_page] = literal[int] , identifier[callback] = literal[string] , identifier[pretty] = literal[string] , ): literal[string] identifier[payload] ={ literal[string] : identifier[self] . identifier[api_key] , literal[string] : identifier[q] , literal[string] : identifier[lang] , literal[string] : identifier[video_type] , literal[string] : identifier[category] , literal[string] : identifier[min_width] , literal[string] : identifier[min_height] , literal[string] : identifier[editors_choice] , literal[string] : identifier[safesearch] , literal[string] : identifier[order] , literal[string] : identifier[page] , literal[string] : identifier[per_page] , literal[string] : identifier[callback] , literal[string] : identifier[pretty] , } identifier[resp] = identifier[get] ( identifier[self] . identifier[root_url] + literal[string] , identifier[params] = identifier[payload] ) keyword[if] identifier[resp] . identifier[status_code] == literal[int] : keyword[return] identifier[resp] . identifier[json] () keyword[else] : keyword[raise] identifier[ValueError] ( identifier[resp] . identifier[text] )
def search(self, q='yellow flower', lang='en', video_type='all', category='', min_width=0, min_height=0, editors_choice='false', safesearch='false', order='popular', page=1, per_page=20, callback='', pretty='false'): """returns videos API data in dict Videos search :param q :type str :desc A URL encoded search term. If omitted, all images are returned. This value may not exceed 100 characters. Example: "yellow+flower" Default: "yellow+flower" :param lang :type str :desc Language code of the language to be searched in. Accepted values: cs, da, de, en, es, fr, id, it, hu, nl, no, pl, pt, ro, sk, fi, sv, tr, vi, th, bg, ru, el, ja, ko, zh Default: "en" For more info, see https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes :param video_type :type str :desc Filter results by video type. Accepted values: "all", "film", "animation" Default: "all" :param category :type str :desc Filter results by category. Accepted values: fashion, nature, backgrounds, science, education, people, feelings, religion, health, places, animals, industry, food, computer, sports, transportation, travel, buildings, business, music :param min_width :type int :desc Minimum image width Default: 0 :param min_height :type int :desc Minimum image height Default: 0 :param editors_choice :type bool (python-pixabay use "true" and "false" string instead) :desc Select images that have received an Editor's Choice award. Accepted values: "true", "false" Default: "false" :param safesearch :type bool (python-pixabay use "true" and "false" string instead) :desc A flag indicating that only images suitable for all ages should be returned. Accepted values: "true", "false" Default: "false" :param order :type str :desc How the results should be ordered. Accepted values: "popular", "latest" Default: "popular" :param page :type int :desc Returned search results are paginated. Use this parameter to select the page number. Default: 1 :param per_page :type int :desc Determine the number of results per page. Accepted values: 3 - 200 Default: 20 :param callback :type str :desc JSONP callback function name :param pretty :type bool (python-pixabay use "true" and "false" string instead) :desc Indent JSON output. This option should not be used in production. Accepted values: "true", "false" Default: "false" Code Example >>> from pixabay import Video >>> >>> video = Video("api_key") >>> video.search(q="apple", page=1) """ payload = {'key': self.api_key, 'q': q, 'lang': lang, 'video_type': video_type, 'category': category, 'min_width': min_width, 'min_height': min_height, 'editors_choice': editors_choice, 'safesearch': safesearch, 'order': order, 'page': page, 'per_page': per_page, 'callback': callback, 'pretty': pretty} resp = get(self.root_url + 'videos/', params=payload) if resp.status_code == 200: return resp.json() # depends on [control=['if'], data=[]] else: raise ValueError(resp.text)
def FPPplots(self, folder=None, format='png', tag=None, **kwargs): """ Make FPP diagnostic plots Makes likelihood "fuzz plot" for each model, a FPP summary figure, a plot of the :class:`TransitSignal`, and writes a ``results.txt`` file. :param folder: (optional) Destination folder for plots/``results.txt``. Default is ``self.folder``. :param format: (optional) Desired format of figures. e.g. ``png``, ``pdf``... :param tag: (optional) If this is provided (string), then filenames will have ``_[tag]`` appended to the filename, before the extension. :param **kwargs: Additional keyword arguments passed to :func:`PopulationSet.lhoodplots`. """ if folder is None: folder = self.folder self.write_results(folder=folder) self.lhoodplots(folder=folder,figformat=format,tag=tag,**kwargs) self.FPPsummary(folder=folder,saveplot=True,figformat=format,tag=tag) self.plotsignal(folder=folder,saveplot=True,figformat=format)
def function[FPPplots, parameter[self, folder, format, tag]]: constant[ Make FPP diagnostic plots Makes likelihood "fuzz plot" for each model, a FPP summary figure, a plot of the :class:`TransitSignal`, and writes a ``results.txt`` file. :param folder: (optional) Destination folder for plots/``results.txt``. Default is ``self.folder``. :param format: (optional) Desired format of figures. e.g. ``png``, ``pdf``... :param tag: (optional) If this is provided (string), then filenames will have ``_[tag]`` appended to the filename, before the extension. :param **kwargs: Additional keyword arguments passed to :func:`PopulationSet.lhoodplots`. ] if compare[name[folder] is constant[None]] begin[:] variable[folder] assign[=] name[self].folder call[name[self].write_results, parameter[]] call[name[self].lhoodplots, parameter[]] call[name[self].FPPsummary, parameter[]] call[name[self].plotsignal, parameter[]]
keyword[def] identifier[FPPplots] ( identifier[self] , identifier[folder] = keyword[None] , identifier[format] = literal[string] , identifier[tag] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[folder] keyword[is] keyword[None] : identifier[folder] = identifier[self] . identifier[folder] identifier[self] . identifier[write_results] ( identifier[folder] = identifier[folder] ) identifier[self] . identifier[lhoodplots] ( identifier[folder] = identifier[folder] , identifier[figformat] = identifier[format] , identifier[tag] = identifier[tag] ,** identifier[kwargs] ) identifier[self] . identifier[FPPsummary] ( identifier[folder] = identifier[folder] , identifier[saveplot] = keyword[True] , identifier[figformat] = identifier[format] , identifier[tag] = identifier[tag] ) identifier[self] . identifier[plotsignal] ( identifier[folder] = identifier[folder] , identifier[saveplot] = keyword[True] , identifier[figformat] = identifier[format] )
def FPPplots(self, folder=None, format='png', tag=None, **kwargs): """ Make FPP diagnostic plots Makes likelihood "fuzz plot" for each model, a FPP summary figure, a plot of the :class:`TransitSignal`, and writes a ``results.txt`` file. :param folder: (optional) Destination folder for plots/``results.txt``. Default is ``self.folder``. :param format: (optional) Desired format of figures. e.g. ``png``, ``pdf``... :param tag: (optional) If this is provided (string), then filenames will have ``_[tag]`` appended to the filename, before the extension. :param **kwargs: Additional keyword arguments passed to :func:`PopulationSet.lhoodplots`. """ if folder is None: folder = self.folder # depends on [control=['if'], data=['folder']] self.write_results(folder=folder) self.lhoodplots(folder=folder, figformat=format, tag=tag, **kwargs) self.FPPsummary(folder=folder, saveplot=True, figformat=format, tag=tag) self.plotsignal(folder=folder, saveplot=True, figformat=format)
def on_modified(self, event, dry_run=False, remove_uploaded=True): 'Called when a file (or directory) is modified. ' super(ArchiveEventHandler, self).on_modified(event) src_path = event.src_path if event.is_directory: if not platform.is_darwin(): log.info("event is a directory, safe to ignore") return # OSX is behaving erratically and we need to paper over it. # the OS only reports every other file event, # but always fires off a directory event when a file has changed. (OSX 10.9 tested) # so we need to find the actual file changed and then go on from there files = [event.src_path+"/"+f for f in os.listdir(event.src_path)] try: src_path = max(files, key=os.path.getmtime) except (OSError, ValueError) as e: # broken symlink or directory empty return log.info('Modified file detectd: %s', src_path) # # we're receiving events at least two times: on file open and on file close. # OSes might report even more # we're only interested in files that are closed (finished), so we try to # open it. if it is locked, we can infer that someone is still writing to it. # this works on platforms: windows, ... ? # TODO: investigate http://stackoverflow.com/a/3876461 for POSIX support try: open(src_path) # win exclusively os.open(src_path, os.O_EXLOCK) # osx exclusively except IOError: # file is not finished log.info('File is not finished') return except AttributeError: # no suuport for O_EXLOCK (only BSD) pass return self._new(src_path, dry_run, remove_uploaded)
def function[on_modified, parameter[self, event, dry_run, remove_uploaded]]: constant[Called when a file (or directory) is modified. ] call[call[name[super], parameter[name[ArchiveEventHandler], name[self]]].on_modified, parameter[name[event]]] variable[src_path] assign[=] name[event].src_path if name[event].is_directory begin[:] if <ast.UnaryOp object at 0x7da20c7cad70> begin[:] call[name[log].info, parameter[constant[event is a directory, safe to ignore]]] return[None] variable[files] assign[=] <ast.ListComp object at 0x7da20c7c88b0> <ast.Try object at 0x7da20c7caf50> call[name[log].info, parameter[constant[Modified file detectd: %s], name[src_path]]] <ast.Try object at 0x7da20c7c8d00> return[call[name[self]._new, parameter[name[src_path], name[dry_run], name[remove_uploaded]]]]
keyword[def] identifier[on_modified] ( identifier[self] , identifier[event] , identifier[dry_run] = keyword[False] , identifier[remove_uploaded] = keyword[True] ): literal[string] identifier[super] ( identifier[ArchiveEventHandler] , identifier[self] ). identifier[on_modified] ( identifier[event] ) identifier[src_path] = identifier[event] . identifier[src_path] keyword[if] identifier[event] . identifier[is_directory] : keyword[if] keyword[not] identifier[platform] . identifier[is_darwin] (): identifier[log] . identifier[info] ( literal[string] ) keyword[return] identifier[files] =[ identifier[event] . identifier[src_path] + literal[string] + identifier[f] keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[event] . identifier[src_path] )] keyword[try] : identifier[src_path] = identifier[max] ( identifier[files] , identifier[key] = identifier[os] . identifier[path] . identifier[getmtime] ) keyword[except] ( identifier[OSError] , identifier[ValueError] ) keyword[as] identifier[e] : keyword[return] identifier[log] . identifier[info] ( literal[string] , identifier[src_path] ) keyword[try] : identifier[open] ( identifier[src_path] ) identifier[os] . identifier[open] ( identifier[src_path] , identifier[os] . identifier[O_EXLOCK] ) keyword[except] identifier[IOError] : identifier[log] . identifier[info] ( literal[string] ) keyword[return] keyword[except] identifier[AttributeError] : keyword[pass] keyword[return] identifier[self] . identifier[_new] ( identifier[src_path] , identifier[dry_run] , identifier[remove_uploaded] )
def on_modified(self, event, dry_run=False, remove_uploaded=True): """Called when a file (or directory) is modified. """ super(ArchiveEventHandler, self).on_modified(event) src_path = event.src_path if event.is_directory: if not platform.is_darwin(): log.info('event is a directory, safe to ignore') return # depends on [control=['if'], data=[]] # OSX is behaving erratically and we need to paper over it. # the OS only reports every other file event, # but always fires off a directory event when a file has changed. (OSX 10.9 tested) # so we need to find the actual file changed and then go on from there files = [event.src_path + '/' + f for f in os.listdir(event.src_path)] try: src_path = max(files, key=os.path.getmtime) # depends on [control=['try'], data=[]] except (OSError, ValueError) as e: # broken symlink or directory empty return # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] log.info('Modified file detectd: %s', src_path) # # we're receiving events at least two times: on file open and on file close. # OSes might report even more # we're only interested in files that are closed (finished), so we try to # open it. if it is locked, we can infer that someone is still writing to it. # this works on platforms: windows, ... ? # TODO: investigate http://stackoverflow.com/a/3876461 for POSIX support try: open(src_path) # win exclusively os.open(src_path, os.O_EXLOCK) # osx exclusively # depends on [control=['try'], data=[]] except IOError: # file is not finished log.info('File is not finished') return # depends on [control=['except'], data=[]] except AttributeError: # no suuport for O_EXLOCK (only BSD) pass # depends on [control=['except'], data=[]] return self._new(src_path, dry_run, remove_uploaded)
def interface_options(score=False, raw=False, features=None, rgb=None): """Get an InterfaceOptions for the config.""" interface = sc_pb.InterfaceOptions() interface.score = score interface.raw = raw if features: interface.feature_layer.width = 24 interface.feature_layer.resolution.x = features interface.feature_layer.resolution.y = features interface.feature_layer.minimap_resolution.x = features interface.feature_layer.minimap_resolution.y = features if rgb: interface.render.resolution.x = rgb interface.render.resolution.y = rgb interface.render.minimap_resolution.x = rgb interface.render.minimap_resolution.y = rgb return interface
def function[interface_options, parameter[score, raw, features, rgb]]: constant[Get an InterfaceOptions for the config.] variable[interface] assign[=] call[name[sc_pb].InterfaceOptions, parameter[]] name[interface].score assign[=] name[score] name[interface].raw assign[=] name[raw] if name[features] begin[:] name[interface].feature_layer.width assign[=] constant[24] name[interface].feature_layer.resolution.x assign[=] name[features] name[interface].feature_layer.resolution.y assign[=] name[features] name[interface].feature_layer.minimap_resolution.x assign[=] name[features] name[interface].feature_layer.minimap_resolution.y assign[=] name[features] if name[rgb] begin[:] name[interface].render.resolution.x assign[=] name[rgb] name[interface].render.resolution.y assign[=] name[rgb] name[interface].render.minimap_resolution.x assign[=] name[rgb] name[interface].render.minimap_resolution.y assign[=] name[rgb] return[name[interface]]
keyword[def] identifier[interface_options] ( identifier[score] = keyword[False] , identifier[raw] = keyword[False] , identifier[features] = keyword[None] , identifier[rgb] = keyword[None] ): literal[string] identifier[interface] = identifier[sc_pb] . identifier[InterfaceOptions] () identifier[interface] . identifier[score] = identifier[score] identifier[interface] . identifier[raw] = identifier[raw] keyword[if] identifier[features] : identifier[interface] . identifier[feature_layer] . identifier[width] = literal[int] identifier[interface] . identifier[feature_layer] . identifier[resolution] . identifier[x] = identifier[features] identifier[interface] . identifier[feature_layer] . identifier[resolution] . identifier[y] = identifier[features] identifier[interface] . identifier[feature_layer] . identifier[minimap_resolution] . identifier[x] = identifier[features] identifier[interface] . identifier[feature_layer] . identifier[minimap_resolution] . identifier[y] = identifier[features] keyword[if] identifier[rgb] : identifier[interface] . identifier[render] . identifier[resolution] . identifier[x] = identifier[rgb] identifier[interface] . identifier[render] . identifier[resolution] . identifier[y] = identifier[rgb] identifier[interface] . identifier[render] . identifier[minimap_resolution] . identifier[x] = identifier[rgb] identifier[interface] . identifier[render] . identifier[minimap_resolution] . identifier[y] = identifier[rgb] keyword[return] identifier[interface]
def interface_options(score=False, raw=False, features=None, rgb=None): """Get an InterfaceOptions for the config.""" interface = sc_pb.InterfaceOptions() interface.score = score interface.raw = raw if features: interface.feature_layer.width = 24 interface.feature_layer.resolution.x = features interface.feature_layer.resolution.y = features interface.feature_layer.minimap_resolution.x = features interface.feature_layer.minimap_resolution.y = features # depends on [control=['if'], data=[]] if rgb: interface.render.resolution.x = rgb interface.render.resolution.y = rgb interface.render.minimap_resolution.x = rgb interface.render.minimap_resolution.y = rgb # depends on [control=['if'], data=[]] return interface
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]: """ Given a diff, returns a dictionary with the added and deleted lines. The dictionary has 2 keys: "added" and "deleted", each containing the corresponding added or deleted lines. For both keys, the value is a list of Tuple (int, str), corresponding to (number of line in the file, actual line). :param str diff: diff of the commit :return: Dictionary """ lines = diff.split('\n') modified_lines = {'added': [], 'deleted': []} count_deletions = 0 count_additions = 0 for line in lines: line = line.rstrip() count_deletions += 1 count_additions += 1 if line.startswith('@@'): count_deletions, count_additions = self._get_line_numbers(line) if line.startswith('-'): modified_lines['deleted'].append((count_deletions, line[1:])) count_additions -= 1 if line.startswith('+'): modified_lines['added'].append((count_additions, line[1:])) count_deletions -= 1 if line == r'\ No newline at end of file': count_deletions -= 1 count_additions -= 1 return modified_lines
def function[parse_diff, parameter[self, diff]]: constant[ Given a diff, returns a dictionary with the added and deleted lines. The dictionary has 2 keys: "added" and "deleted", each containing the corresponding added or deleted lines. For both keys, the value is a list of Tuple (int, str), corresponding to (number of line in the file, actual line). :param str diff: diff of the commit :return: Dictionary ] variable[lines] assign[=] call[name[diff].split, parameter[constant[ ]]] variable[modified_lines] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b3fa0>, <ast.Constant object at 0x7da20e9b0e20>], [<ast.List object at 0x7da20e9b2380>, <ast.List object at 0x7da20e9b2440>]] variable[count_deletions] assign[=] constant[0] variable[count_additions] assign[=] constant[0] for taget[name[line]] in starred[name[lines]] begin[:] variable[line] assign[=] call[name[line].rstrip, parameter[]] <ast.AugAssign object at 0x7da20e9b0a60> <ast.AugAssign object at 0x7da20e9b2950> if call[name[line].startswith, parameter[constant[@@]]] begin[:] <ast.Tuple object at 0x7da20e9b2770> assign[=] call[name[self]._get_line_numbers, parameter[name[line]]] if call[name[line].startswith, parameter[constant[-]]] begin[:] call[call[name[modified_lines]][constant[deleted]].append, parameter[tuple[[<ast.Name object at 0x7da20e9b2110>, <ast.Subscript object at 0x7da20e9b1c90>]]]] <ast.AugAssign object at 0x7da20e9b19c0> if call[name[line].startswith, parameter[constant[+]]] begin[:] call[call[name[modified_lines]][constant[added]].append, parameter[tuple[[<ast.Name object at 0x7da20e9b01c0>, <ast.Subscript object at 0x7da20e9b3220>]]]] <ast.AugAssign object at 0x7da20e9b3af0> if compare[name[line] equal[==] constant[\ No newline at end of file]] begin[:] <ast.AugAssign object at 0x7da20e9b2590> <ast.AugAssign object at 0x7da20e9b1d50> return[name[modified_lines]]
keyword[def] identifier[parse_diff] ( identifier[self] , identifier[diff] : identifier[str] )-> identifier[Dict] [ identifier[str] , identifier[List] [ identifier[Tuple] [ identifier[int] , identifier[str] ]]]: literal[string] identifier[lines] = identifier[diff] . identifier[split] ( literal[string] ) identifier[modified_lines] ={ literal[string] :[], literal[string] :[]} identifier[count_deletions] = literal[int] identifier[count_additions] = literal[int] keyword[for] identifier[line] keyword[in] identifier[lines] : identifier[line] = identifier[line] . identifier[rstrip] () identifier[count_deletions] += literal[int] identifier[count_additions] += literal[int] keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): identifier[count_deletions] , identifier[count_additions] = identifier[self] . identifier[_get_line_numbers] ( identifier[line] ) keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): identifier[modified_lines] [ literal[string] ]. identifier[append] (( identifier[count_deletions] , identifier[line] [ literal[int] :])) identifier[count_additions] -= literal[int] keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): identifier[modified_lines] [ literal[string] ]. identifier[append] (( identifier[count_additions] , identifier[line] [ literal[int] :])) identifier[count_deletions] -= literal[int] keyword[if] identifier[line] == literal[string] : identifier[count_deletions] -= literal[int] identifier[count_additions] -= literal[int] keyword[return] identifier[modified_lines]
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]: """ Given a diff, returns a dictionary with the added and deleted lines. The dictionary has 2 keys: "added" and "deleted", each containing the corresponding added or deleted lines. For both keys, the value is a list of Tuple (int, str), corresponding to (number of line in the file, actual line). :param str diff: diff of the commit :return: Dictionary """ lines = diff.split('\n') modified_lines = {'added': [], 'deleted': []} count_deletions = 0 count_additions = 0 for line in lines: line = line.rstrip() count_deletions += 1 count_additions += 1 if line.startswith('@@'): (count_deletions, count_additions) = self._get_line_numbers(line) # depends on [control=['if'], data=[]] if line.startswith('-'): modified_lines['deleted'].append((count_deletions, line[1:])) count_additions -= 1 # depends on [control=['if'], data=[]] if line.startswith('+'): modified_lines['added'].append((count_additions, line[1:])) count_deletions -= 1 # depends on [control=['if'], data=[]] if line == '\\ No newline at end of file': count_deletions -= 1 count_additions -= 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] return modified_lines
def _get_upload_arguments(self, content_type): """Get required arguments for performing an upload. The content type returned will be determined in order of precedence: - The value passed in to this method (if not :data:`None`) - The value stored on the current blob - The default value ('application/octet-stream') :type content_type: str :param content_type: Type of content being uploaded (or :data:`None`). :rtype: tuple :returns: A triple of * A header dictionary * An object metadata dictionary * The ``content_type`` as a string (according to precedence) """ headers = _get_encryption_headers(self._encryption_key) object_metadata = self._get_writable_metadata() content_type = self._get_content_type(content_type) return headers, object_metadata, content_type
def function[_get_upload_arguments, parameter[self, content_type]]: constant[Get required arguments for performing an upload. The content type returned will be determined in order of precedence: - The value passed in to this method (if not :data:`None`) - The value stored on the current blob - The default value ('application/octet-stream') :type content_type: str :param content_type: Type of content being uploaded (or :data:`None`). :rtype: tuple :returns: A triple of * A header dictionary * An object metadata dictionary * The ``content_type`` as a string (according to precedence) ] variable[headers] assign[=] call[name[_get_encryption_headers], parameter[name[self]._encryption_key]] variable[object_metadata] assign[=] call[name[self]._get_writable_metadata, parameter[]] variable[content_type] assign[=] call[name[self]._get_content_type, parameter[name[content_type]]] return[tuple[[<ast.Name object at 0x7da20c6a81f0>, <ast.Name object at 0x7da20c6a8c40>, <ast.Name object at 0x7da20c6a9e40>]]]
keyword[def] identifier[_get_upload_arguments] ( identifier[self] , identifier[content_type] ): literal[string] identifier[headers] = identifier[_get_encryption_headers] ( identifier[self] . identifier[_encryption_key] ) identifier[object_metadata] = identifier[self] . identifier[_get_writable_metadata] () identifier[content_type] = identifier[self] . identifier[_get_content_type] ( identifier[content_type] ) keyword[return] identifier[headers] , identifier[object_metadata] , identifier[content_type]
def _get_upload_arguments(self, content_type): """Get required arguments for performing an upload. The content type returned will be determined in order of precedence: - The value passed in to this method (if not :data:`None`) - The value stored on the current blob - The default value ('application/octet-stream') :type content_type: str :param content_type: Type of content being uploaded (or :data:`None`). :rtype: tuple :returns: A triple of * A header dictionary * An object metadata dictionary * The ``content_type`` as a string (according to precedence) """ headers = _get_encryption_headers(self._encryption_key) object_metadata = self._get_writable_metadata() content_type = self._get_content_type(content_type) return (headers, object_metadata, content_type)
def delete_ace(self, domain=None, user=None, sid=None): """ delete ACE for the share delete ACE for the share. User could either supply the domain and username or the sid of the user. :param domain: domain of the user :param user: username :param sid: sid of the user or sid list of the user :return: REST API response """ if sid is None: if domain is None: domain = self.cifs_server.domain sid = UnityAclUser.get_sid(self._cli, user=user, domain=domain) if isinstance(sid, six.string_types): sid = [sid] ace_list = [self._make_remove_ace_entry(s) for s in sid] resp = self.action("setACEs", cifsShareACEs=ace_list) resp.raise_if_err() return resp
def function[delete_ace, parameter[self, domain, user, sid]]: constant[ delete ACE for the share delete ACE for the share. User could either supply the domain and username or the sid of the user. :param domain: domain of the user :param user: username :param sid: sid of the user or sid list of the user :return: REST API response ] if compare[name[sid] is constant[None]] begin[:] if compare[name[domain] is constant[None]] begin[:] variable[domain] assign[=] name[self].cifs_server.domain variable[sid] assign[=] call[name[UnityAclUser].get_sid, parameter[name[self]._cli]] if call[name[isinstance], parameter[name[sid], name[six].string_types]] begin[:] variable[sid] assign[=] list[[<ast.Name object at 0x7da1b1150850>]] variable[ace_list] assign[=] <ast.ListComp object at 0x7da1b1150bb0> variable[resp] assign[=] call[name[self].action, parameter[constant[setACEs]]] call[name[resp].raise_if_err, parameter[]] return[name[resp]]
keyword[def] identifier[delete_ace] ( identifier[self] , identifier[domain] = keyword[None] , identifier[user] = keyword[None] , identifier[sid] = keyword[None] ): literal[string] keyword[if] identifier[sid] keyword[is] keyword[None] : keyword[if] identifier[domain] keyword[is] keyword[None] : identifier[domain] = identifier[self] . identifier[cifs_server] . identifier[domain] identifier[sid] = identifier[UnityAclUser] . identifier[get_sid] ( identifier[self] . identifier[_cli] , identifier[user] = identifier[user] , identifier[domain] = identifier[domain] ) keyword[if] identifier[isinstance] ( identifier[sid] , identifier[six] . identifier[string_types] ): identifier[sid] =[ identifier[sid] ] identifier[ace_list] =[ identifier[self] . identifier[_make_remove_ace_entry] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[sid] ] identifier[resp] = identifier[self] . identifier[action] ( literal[string] , identifier[cifsShareACEs] = identifier[ace_list] ) identifier[resp] . identifier[raise_if_err] () keyword[return] identifier[resp]
def delete_ace(self, domain=None, user=None, sid=None): """ delete ACE for the share delete ACE for the share. User could either supply the domain and username or the sid of the user. :param domain: domain of the user :param user: username :param sid: sid of the user or sid list of the user :return: REST API response """ if sid is None: if domain is None: domain = self.cifs_server.domain # depends on [control=['if'], data=['domain']] sid = UnityAclUser.get_sid(self._cli, user=user, domain=domain) # depends on [control=['if'], data=['sid']] if isinstance(sid, six.string_types): sid = [sid] # depends on [control=['if'], data=[]] ace_list = [self._make_remove_ace_entry(s) for s in sid] resp = self.action('setACEs', cifsShareACEs=ace_list) resp.raise_if_err() return resp
def print_traceback(self): """ Print the traceback of the exception wrapped by the AbbreviatedException. """ traceback.print_exception(self.etype, self.value, self.traceback)
def function[print_traceback, parameter[self]]: constant[ Print the traceback of the exception wrapped by the AbbreviatedException. ] call[name[traceback].print_exception, parameter[name[self].etype, name[self].value, name[self].traceback]]
keyword[def] identifier[print_traceback] ( identifier[self] ): literal[string] identifier[traceback] . identifier[print_exception] ( identifier[self] . identifier[etype] , identifier[self] . identifier[value] , identifier[self] . identifier[traceback] )
def print_traceback(self): """ Print the traceback of the exception wrapped by the AbbreviatedException. """ traceback.print_exception(self.etype, self.value, self.traceback)
def check_cache(self, template): ''' Cache a file only once ''' if template not in self.cached: self.cache_file(template) self.cached.append(template)
def function[check_cache, parameter[self, template]]: constant[ Cache a file only once ] if compare[name[template] <ast.NotIn object at 0x7da2590d7190> name[self].cached] begin[:] call[name[self].cache_file, parameter[name[template]]] call[name[self].cached.append, parameter[name[template]]]
keyword[def] identifier[check_cache] ( identifier[self] , identifier[template] ): literal[string] keyword[if] identifier[template] keyword[not] keyword[in] identifier[self] . identifier[cached] : identifier[self] . identifier[cache_file] ( identifier[template] ) identifier[self] . identifier[cached] . identifier[append] ( identifier[template] )
def check_cache(self, template): """ Cache a file only once """ if template not in self.cached: self.cache_file(template) self.cached.append(template) # depends on [control=['if'], data=['template']]
def _dispatch_send(self, message): """ Dispatch the different steps of sending """ if self.dryrun: return message if not self.socket: raise GraphiteSendException( "Socket was not created before send" ) sending_function = self._send if self._autoreconnect: sending_function = self._send_and_reconnect try: if self.asynchronous and gevent: gevent.spawn(sending_function, message) else: sending_function(message) except Exception as e: self._handle_send_error(e) return "sent {0} long message: {1}".format(len(message), message[:75])
def function[_dispatch_send, parameter[self, message]]: constant[ Dispatch the different steps of sending ] if name[self].dryrun begin[:] return[name[message]] if <ast.UnaryOp object at 0x7da1b1026860> begin[:] <ast.Raise object at 0x7da1b10261a0> variable[sending_function] assign[=] name[self]._send if name[self]._autoreconnect begin[:] variable[sending_function] assign[=] name[self]._send_and_reconnect <ast.Try object at 0x7da1b1025720> return[call[constant[sent {0} long message: {1}].format, parameter[call[name[len], parameter[name[message]]], call[name[message]][<ast.Slice object at 0x7da1b1025f00>]]]]
keyword[def] identifier[_dispatch_send] ( identifier[self] , identifier[message] ): literal[string] keyword[if] identifier[self] . identifier[dryrun] : keyword[return] identifier[message] keyword[if] keyword[not] identifier[self] . identifier[socket] : keyword[raise] identifier[GraphiteSendException] ( literal[string] ) identifier[sending_function] = identifier[self] . identifier[_send] keyword[if] identifier[self] . identifier[_autoreconnect] : identifier[sending_function] = identifier[self] . identifier[_send_and_reconnect] keyword[try] : keyword[if] identifier[self] . identifier[asynchronous] keyword[and] identifier[gevent] : identifier[gevent] . identifier[spawn] ( identifier[sending_function] , identifier[message] ) keyword[else] : identifier[sending_function] ( identifier[message] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[self] . identifier[_handle_send_error] ( identifier[e] ) keyword[return] literal[string] . identifier[format] ( identifier[len] ( identifier[message] ), identifier[message] [: literal[int] ])
def _dispatch_send(self, message): """ Dispatch the different steps of sending """ if self.dryrun: return message # depends on [control=['if'], data=[]] if not self.socket: raise GraphiteSendException('Socket was not created before send') # depends on [control=['if'], data=[]] sending_function = self._send if self._autoreconnect: sending_function = self._send_and_reconnect # depends on [control=['if'], data=[]] try: if self.asynchronous and gevent: gevent.spawn(sending_function, message) # depends on [control=['if'], data=[]] else: sending_function(message) # depends on [control=['try'], data=[]] except Exception as e: self._handle_send_error(e) # depends on [control=['except'], data=['e']] return 'sent {0} long message: {1}'.format(len(message), message[:75])
def _get_server_cert(response): """ Get the certificate at the request_url and return it as a SHA256 hash. Will get the raw socket from the original response from the server. This socket is then checked if it is an SSL socket and then used to get the hash of the certificate. The certificate hash is then used with NTLMv2 authentication for Channel Binding Tokens support. If the raw object is not a urllib3 HTTPReponse (default with requests) then no certificate will be returned. :param response: The original 401 response from the server :return: SHA256 hash of the DER encoded certificate at the request_url or None if not a HTTPS endpoint """ certificate_hash = None raw_response = response.raw if isinstance(raw_response, HTTPResponse): if sys.version_info > (3, 0): socket = raw_response._fp.fp.raw._sock else: socket = raw_response._fp.fp._sock try: server_certificate = socket.getpeercert(True) except AttributeError: pass else: hash_object = hashlib.sha256(server_certificate) certificate_hash = hash_object.hexdigest().upper() else: warnings.warn("Requests is running with a non urllib3 backend, cannot retrieve server certificate for CBT", NoCertificateRetrievedWarning) return certificate_hash
def function[_get_server_cert, parameter[response]]: constant[ Get the certificate at the request_url and return it as a SHA256 hash. Will get the raw socket from the original response from the server. This socket is then checked if it is an SSL socket and then used to get the hash of the certificate. The certificate hash is then used with NTLMv2 authentication for Channel Binding Tokens support. If the raw object is not a urllib3 HTTPReponse (default with requests) then no certificate will be returned. :param response: The original 401 response from the server :return: SHA256 hash of the DER encoded certificate at the request_url or None if not a HTTPS endpoint ] variable[certificate_hash] assign[=] constant[None] variable[raw_response] assign[=] name[response].raw if call[name[isinstance], parameter[name[raw_response], name[HTTPResponse]]] begin[:] if compare[name[sys].version_info greater[>] tuple[[<ast.Constant object at 0x7da2047ea4d0>, <ast.Constant object at 0x7da2047e86d0>]]] begin[:] variable[socket] assign[=] name[raw_response]._fp.fp.raw._sock <ast.Try object at 0x7da2047e8190> return[name[certificate_hash]]
keyword[def] identifier[_get_server_cert] ( identifier[response] ): literal[string] identifier[certificate_hash] = keyword[None] identifier[raw_response] = identifier[response] . identifier[raw] keyword[if] identifier[isinstance] ( identifier[raw_response] , identifier[HTTPResponse] ): keyword[if] identifier[sys] . identifier[version_info] >( literal[int] , literal[int] ): identifier[socket] = identifier[raw_response] . identifier[_fp] . identifier[fp] . identifier[raw] . identifier[_sock] keyword[else] : identifier[socket] = identifier[raw_response] . identifier[_fp] . identifier[fp] . identifier[_sock] keyword[try] : identifier[server_certificate] = identifier[socket] . identifier[getpeercert] ( keyword[True] ) keyword[except] identifier[AttributeError] : keyword[pass] keyword[else] : identifier[hash_object] = identifier[hashlib] . identifier[sha256] ( identifier[server_certificate] ) identifier[certificate_hash] = identifier[hash_object] . identifier[hexdigest] (). identifier[upper] () keyword[else] : identifier[warnings] . identifier[warn] ( literal[string] , identifier[NoCertificateRetrievedWarning] ) keyword[return] identifier[certificate_hash]
def _get_server_cert(response): """ Get the certificate at the request_url and return it as a SHA256 hash. Will get the raw socket from the original response from the server. This socket is then checked if it is an SSL socket and then used to get the hash of the certificate. The certificate hash is then used with NTLMv2 authentication for Channel Binding Tokens support. If the raw object is not a urllib3 HTTPReponse (default with requests) then no certificate will be returned. :param response: The original 401 response from the server :return: SHA256 hash of the DER encoded certificate at the request_url or None if not a HTTPS endpoint """ certificate_hash = None raw_response = response.raw if isinstance(raw_response, HTTPResponse): if sys.version_info > (3, 0): socket = raw_response._fp.fp.raw._sock # depends on [control=['if'], data=[]] else: socket = raw_response._fp.fp._sock try: server_certificate = socket.getpeercert(True) # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] else: hash_object = hashlib.sha256(server_certificate) certificate_hash = hash_object.hexdigest().upper() # depends on [control=['if'], data=[]] else: warnings.warn('Requests is running with a non urllib3 backend, cannot retrieve server certificate for CBT', NoCertificateRetrievedWarning) return certificate_hash
def growth(interval, pricecol, eqdata): """ Retrieve growth labels. Parameters -------------- interval : int Number of sessions over which growth is measured. For example, if the value of 32 is passed for `interval`, the data returned will show the growth 32 sessions ahead for each data point. eqdata : DataFrame Data for evaluating growth. pricecol : str Column of `eqdata` to be used for prices (Normally 'Adj Close'). Returns -------- labels : DataFrame Growth labels for the specified period skipatend : int Number of rows skipped at the end of `eqdata` for the given labels. Used to synchronize labels and features. Examples --------------- >>> from functools import partial >>> features, labels = pn.data.labeledfeatures(eqdata, 256, ... partial(pn.data.lab.growth, 32, 'Adj Close')) """ size = len(eqdata.index) labeldata = eqdata.loc[:, pricecol].values[interval:] /\ eqdata.loc[:, pricecol].values[:(size - interval)] df = pd.DataFrame(data=labeldata, index=eqdata.index[:(size - interval)], columns=['Growth'], dtype='float64') return df
def function[growth, parameter[interval, pricecol, eqdata]]: constant[ Retrieve growth labels. Parameters -------------- interval : int Number of sessions over which growth is measured. For example, if the value of 32 is passed for `interval`, the data returned will show the growth 32 sessions ahead for each data point. eqdata : DataFrame Data for evaluating growth. pricecol : str Column of `eqdata` to be used for prices (Normally 'Adj Close'). Returns -------- labels : DataFrame Growth labels for the specified period skipatend : int Number of rows skipped at the end of `eqdata` for the given labels. Used to synchronize labels and features. Examples --------------- >>> from functools import partial >>> features, labels = pn.data.labeledfeatures(eqdata, 256, ... partial(pn.data.lab.growth, 32, 'Adj Close')) ] variable[size] assign[=] call[name[len], parameter[name[eqdata].index]] variable[labeldata] assign[=] binary_operation[call[call[name[eqdata].loc][tuple[[<ast.Slice object at 0x7da2054a7460>, <ast.Name object at 0x7da2054a64a0>]]].values][<ast.Slice object at 0x7da2054a7eb0>] / call[call[name[eqdata].loc][tuple[[<ast.Slice object at 0x7da2054a76d0>, <ast.Name object at 0x7da2054a5030>]]].values][<ast.Slice object at 0x7da2054a7280>]] variable[df] assign[=] call[name[pd].DataFrame, parameter[]] return[name[df]]
keyword[def] identifier[growth] ( identifier[interval] , identifier[pricecol] , identifier[eqdata] ): literal[string] identifier[size] = identifier[len] ( identifier[eqdata] . identifier[index] ) identifier[labeldata] = identifier[eqdata] . identifier[loc] [:, identifier[pricecol] ]. identifier[values] [ identifier[interval] :]/ identifier[eqdata] . identifier[loc] [:, identifier[pricecol] ]. identifier[values] [:( identifier[size] - identifier[interval] )] identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[data] = identifier[labeldata] , identifier[index] = identifier[eqdata] . identifier[index] [:( identifier[size] - identifier[interval] )], identifier[columns] =[ literal[string] ], identifier[dtype] = literal[string] ) keyword[return] identifier[df]
def growth(interval, pricecol, eqdata): """ Retrieve growth labels. Parameters -------------- interval : int Number of sessions over which growth is measured. For example, if the value of 32 is passed for `interval`, the data returned will show the growth 32 sessions ahead for each data point. eqdata : DataFrame Data for evaluating growth. pricecol : str Column of `eqdata` to be used for prices (Normally 'Adj Close'). Returns -------- labels : DataFrame Growth labels for the specified period skipatend : int Number of rows skipped at the end of `eqdata` for the given labels. Used to synchronize labels and features. Examples --------------- >>> from functools import partial >>> features, labels = pn.data.labeledfeatures(eqdata, 256, ... partial(pn.data.lab.growth, 32, 'Adj Close')) """ size = len(eqdata.index) labeldata = eqdata.loc[:, pricecol].values[interval:] / eqdata.loc[:, pricecol].values[:size - interval] df = pd.DataFrame(data=labeldata, index=eqdata.index[:size - interval], columns=['Growth'], dtype='float64') return df
def _check_toolplus(x): """Parse options for adding non-standard/commercial tools like GATK and MuTecT. """ import argparse Tool = collections.namedtuple("Tool", ["name", "fname"]) std_choices = set(["data", "cadd", "dbnsfp", "ericscript"]) if x in std_choices: return Tool(x, None) elif "=" in x and len(x.split("=")) == 2: name, fname = x.split("=") fname = os.path.normpath(os.path.realpath(fname)) if not os.path.exists(fname): raise argparse.ArgumentTypeError("Unexpected --toolplus argument for %s. File does not exist: %s" % (name, fname)) return Tool(name, fname) else: raise argparse.ArgumentTypeError("Unexpected --toolplus argument. Expect toolname=filename.")
def function[_check_toolplus, parameter[x]]: constant[Parse options for adding non-standard/commercial tools like GATK and MuTecT. ] import module[argparse] variable[Tool] assign[=] call[name[collections].namedtuple, parameter[constant[Tool], list[[<ast.Constant object at 0x7da1b18aa440>, <ast.Constant object at 0x7da1b18aa3b0>]]]] variable[std_choices] assign[=] call[name[set], parameter[list[[<ast.Constant object at 0x7da1b18aa410>, <ast.Constant object at 0x7da1b18ab6a0>, <ast.Constant object at 0x7da1b18a8b80>, <ast.Constant object at 0x7da1b18aaf20>]]]] if compare[name[x] in name[std_choices]] begin[:] return[call[name[Tool], parameter[name[x], constant[None]]]]
keyword[def] identifier[_check_toolplus] ( identifier[x] ): literal[string] keyword[import] identifier[argparse] identifier[Tool] = identifier[collections] . identifier[namedtuple] ( literal[string] ,[ literal[string] , literal[string] ]) identifier[std_choices] = identifier[set] ([ literal[string] , literal[string] , literal[string] , literal[string] ]) keyword[if] identifier[x] keyword[in] identifier[std_choices] : keyword[return] identifier[Tool] ( identifier[x] , keyword[None] ) keyword[elif] literal[string] keyword[in] identifier[x] keyword[and] identifier[len] ( identifier[x] . identifier[split] ( literal[string] ))== literal[int] : identifier[name] , identifier[fname] = identifier[x] . identifier[split] ( literal[string] ) identifier[fname] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[fname] )) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[fname] ): keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] ( literal[string] %( identifier[name] , identifier[fname] )) keyword[return] identifier[Tool] ( identifier[name] , identifier[fname] ) keyword[else] : keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] ( literal[string] )
def _check_toolplus(x): """Parse options for adding non-standard/commercial tools like GATK and MuTecT. """ import argparse Tool = collections.namedtuple('Tool', ['name', 'fname']) std_choices = set(['data', 'cadd', 'dbnsfp', 'ericscript']) if x in std_choices: return Tool(x, None) # depends on [control=['if'], data=['x']] elif '=' in x and len(x.split('=')) == 2: (name, fname) = x.split('=') fname = os.path.normpath(os.path.realpath(fname)) if not os.path.exists(fname): raise argparse.ArgumentTypeError('Unexpected --toolplus argument for %s. File does not exist: %s' % (name, fname)) # depends on [control=['if'], data=[]] return Tool(name, fname) # depends on [control=['if'], data=[]] else: raise argparse.ArgumentTypeError('Unexpected --toolplus argument. Expect toolname=filename.')
def get_ci(theta_star, blockratio=1.0): """ Get the confidence interval. """ # get rid of nans while we sort b_star = np.sort(theta_star[~np.isnan(theta_star)]) se = np.std(b_star) * np.sqrt(blockratio) # bootstrap 95% CI based on empirical percentiles ci = [b_star[int(len(b_star) * .025)], b_star[int(len(b_star) * .975)]] return ci
def function[get_ci, parameter[theta_star, blockratio]]: constant[ Get the confidence interval. ] variable[b_star] assign[=] call[name[np].sort, parameter[call[name[theta_star]][<ast.UnaryOp object at 0x7da20c7cbbb0>]]] variable[se] assign[=] binary_operation[call[name[np].std, parameter[name[b_star]]] * call[name[np].sqrt, parameter[name[blockratio]]]] variable[ci] assign[=] list[[<ast.Subscript object at 0x7da20c7ca920>, <ast.Subscript object at 0x7da20c7ca4d0>]] return[name[ci]]
keyword[def] identifier[get_ci] ( identifier[theta_star] , identifier[blockratio] = literal[int] ): literal[string] identifier[b_star] = identifier[np] . identifier[sort] ( identifier[theta_star] [~ identifier[np] . identifier[isnan] ( identifier[theta_star] )]) identifier[se] = identifier[np] . identifier[std] ( identifier[b_star] )* identifier[np] . identifier[sqrt] ( identifier[blockratio] ) identifier[ci] =[ identifier[b_star] [ identifier[int] ( identifier[len] ( identifier[b_star] )* literal[int] )], identifier[b_star] [ identifier[int] ( identifier[len] ( identifier[b_star] )* literal[int] )]] keyword[return] identifier[ci]
def get_ci(theta_star, blockratio=1.0): """ Get the confidence interval. """ # get rid of nans while we sort b_star = np.sort(theta_star[~np.isnan(theta_star)]) se = np.std(b_star) * np.sqrt(blockratio) # bootstrap 95% CI based on empirical percentiles ci = [b_star[int(len(b_star) * 0.025)], b_star[int(len(b_star) * 0.975)]] return ci
def accept(self): """ Get the Accept option of a request. :return: the Accept value or None if not specified by the request :rtype : String """ for option in self.options: if option.number == defines.OptionRegistry.ACCEPT.number: return option.value return None
def function[accept, parameter[self]]: constant[ Get the Accept option of a request. :return: the Accept value or None if not specified by the request :rtype : String ] for taget[name[option]] in starred[name[self].options] begin[:] if compare[name[option].number equal[==] name[defines].OptionRegistry.ACCEPT.number] begin[:] return[name[option].value] return[constant[None]]
keyword[def] identifier[accept] ( identifier[self] ): literal[string] keyword[for] identifier[option] keyword[in] identifier[self] . identifier[options] : keyword[if] identifier[option] . identifier[number] == identifier[defines] . identifier[OptionRegistry] . identifier[ACCEPT] . identifier[number] : keyword[return] identifier[option] . identifier[value] keyword[return] keyword[None]
def accept(self): """ Get the Accept option of a request. :return: the Accept value or None if not specified by the request :rtype : String """ for option in self.options: if option.number == defines.OptionRegistry.ACCEPT.number: return option.value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['option']] return None
def emit_node(self, name, **props): """emit a node with given properties. node properties: see http://www.graphviz.org/doc/info/attrs.html """ attrs = ['%s="%s"' % (prop, value) for prop, value in props.items()] self.emit("%s [%s];" % (normalize_node_id(name), ", ".join(sorted(attrs))))
def function[emit_node, parameter[self, name]]: constant[emit a node with given properties. node properties: see http://www.graphviz.org/doc/info/attrs.html ] variable[attrs] assign[=] <ast.ListComp object at 0x7da1b025aaa0> call[name[self].emit, parameter[binary_operation[constant[%s [%s];] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b025b760>, <ast.Call object at 0x7da1b025a0b0>]]]]]
keyword[def] identifier[emit_node] ( identifier[self] , identifier[name] ,** identifier[props] ): literal[string] identifier[attrs] =[ literal[string] %( identifier[prop] , identifier[value] ) keyword[for] identifier[prop] , identifier[value] keyword[in] identifier[props] . identifier[items] ()] identifier[self] . identifier[emit] ( literal[string] %( identifier[normalize_node_id] ( identifier[name] ), literal[string] . identifier[join] ( identifier[sorted] ( identifier[attrs] ))))
def emit_node(self, name, **props): """emit a node with given properties. node properties: see http://www.graphviz.org/doc/info/attrs.html """ attrs = ['%s="%s"' % (prop, value) for (prop, value) in props.items()] self.emit('%s [%s];' % (normalize_node_id(name), ', '.join(sorted(attrs))))
def plot_data_error(self, which_data_rows='all', which_data_ycols='all', visible_dims=None, projection='2d', label=None, **error_kwargs): """ Plot the training data input error. For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed. Can plot only part of the data using which_data_rows and which_data_ycols. :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice self.X, self.Y :param which_data_ycols: when the data has several columns (independant outputs), only plot these :type which_data_ycols: 'all' or a list of integers :param visible_dims: an array specifying the input dimensions to plot (maximum two) :type visible_dims: a numpy array :param {'2d','3d'} projection: whether to plot in 2d or 3d. This only applies when plotting two dimensional inputs! :param dict error_kwargs: kwargs for the error plot for the plotting library you are using :param str label: the label for the plot :param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using :returns list: of plots created. """ canvas, error_kwargs = pl().new_canvas(projection=projection, **error_kwargs) plots = _plot_data_error(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, label, **error_kwargs) return pl().add_to_canvas(canvas, plots)
def function[plot_data_error, parameter[self, which_data_rows, which_data_ycols, visible_dims, projection, label]]: constant[ Plot the training data input error. For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed. Can plot only part of the data using which_data_rows and which_data_ycols. :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice self.X, self.Y :param which_data_ycols: when the data has several columns (independant outputs), only plot these :type which_data_ycols: 'all' or a list of integers :param visible_dims: an array specifying the input dimensions to plot (maximum two) :type visible_dims: a numpy array :param {'2d','3d'} projection: whether to plot in 2d or 3d. This only applies when plotting two dimensional inputs! :param dict error_kwargs: kwargs for the error plot for the plotting library you are using :param str label: the label for the plot :param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using :returns list: of plots created. ] <ast.Tuple object at 0x7da1b21d5150> assign[=] call[call[name[pl], parameter[]].new_canvas, parameter[]] variable[plots] assign[=] call[name[_plot_data_error], parameter[name[self], name[canvas], name[which_data_rows], name[which_data_ycols], name[visible_dims], name[projection], name[label]]] return[call[call[name[pl], parameter[]].add_to_canvas, parameter[name[canvas], name[plots]]]]
keyword[def] identifier[plot_data_error] ( identifier[self] , identifier[which_data_rows] = literal[string] , identifier[which_data_ycols] = literal[string] , identifier[visible_dims] = keyword[None] , identifier[projection] = literal[string] , identifier[label] = keyword[None] ,** identifier[error_kwargs] ): literal[string] identifier[canvas] , identifier[error_kwargs] = identifier[pl] (). identifier[new_canvas] ( identifier[projection] = identifier[projection] ,** identifier[error_kwargs] ) identifier[plots] = identifier[_plot_data_error] ( identifier[self] , identifier[canvas] , identifier[which_data_rows] , identifier[which_data_ycols] , identifier[visible_dims] , identifier[projection] , identifier[label] ,** identifier[error_kwargs] ) keyword[return] identifier[pl] (). identifier[add_to_canvas] ( identifier[canvas] , identifier[plots] )
def plot_data_error(self, which_data_rows='all', which_data_ycols='all', visible_dims=None, projection='2d', label=None, **error_kwargs): """ Plot the training data input error. For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed. Can plot only part of the data using which_data_rows and which_data_ycols. :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice self.X, self.Y :param which_data_ycols: when the data has several columns (independant outputs), only plot these :type which_data_ycols: 'all' or a list of integers :param visible_dims: an array specifying the input dimensions to plot (maximum two) :type visible_dims: a numpy array :param {'2d','3d'} projection: whether to plot in 2d or 3d. This only applies when plotting two dimensional inputs! :param dict error_kwargs: kwargs for the error plot for the plotting library you are using :param str label: the label for the plot :param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using :returns list: of plots created. """ (canvas, error_kwargs) = pl().new_canvas(projection=projection, **error_kwargs) plots = _plot_data_error(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, label, **error_kwargs) return pl().add_to_canvas(canvas, plots)
def edit(self, name, label=None): """Edit this asset. :param str name: (required), The file name of the asset :param str label: (optional), An alternate description of the asset :returns: boolean """ if not name: return False edit_data = {'name': name, 'label': label} self._remove_none(edit_data) r = self._patch( self._api, data=json.dumps(edit_data), headers=Release.CUSTOM_HEADERS ) successful = self._boolean(r, 200, 404) if successful: self.__init__(r.json(), self) return successful
def function[edit, parameter[self, name, label]]: constant[Edit this asset. :param str name: (required), The file name of the asset :param str label: (optional), An alternate description of the asset :returns: boolean ] if <ast.UnaryOp object at 0x7da1b0e0f160> begin[:] return[constant[False]] variable[edit_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e0cd00>, <ast.Constant object at 0x7da1b0e0efe0>], [<ast.Name object at 0x7da1b0e0e440>, <ast.Name object at 0x7da1b0e0e4a0>]] call[name[self]._remove_none, parameter[name[edit_data]]] variable[r] assign[=] call[name[self]._patch, parameter[name[self]._api]] variable[successful] assign[=] call[name[self]._boolean, parameter[name[r], constant[200], constant[404]]] if name[successful] begin[:] call[name[self].__init__, parameter[call[name[r].json, parameter[]], name[self]]] return[name[successful]]
keyword[def] identifier[edit] ( identifier[self] , identifier[name] , identifier[label] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[name] : keyword[return] keyword[False] identifier[edit_data] ={ literal[string] : identifier[name] , literal[string] : identifier[label] } identifier[self] . identifier[_remove_none] ( identifier[edit_data] ) identifier[r] = identifier[self] . identifier[_patch] ( identifier[self] . identifier[_api] , identifier[data] = identifier[json] . identifier[dumps] ( identifier[edit_data] ), identifier[headers] = identifier[Release] . identifier[CUSTOM_HEADERS] ) identifier[successful] = identifier[self] . identifier[_boolean] ( identifier[r] , literal[int] , literal[int] ) keyword[if] identifier[successful] : identifier[self] . identifier[__init__] ( identifier[r] . identifier[json] (), identifier[self] ) keyword[return] identifier[successful]
def edit(self, name, label=None): """Edit this asset. :param str name: (required), The file name of the asset :param str label: (optional), An alternate description of the asset :returns: boolean """ if not name: return False # depends on [control=['if'], data=[]] edit_data = {'name': name, 'label': label} self._remove_none(edit_data) r = self._patch(self._api, data=json.dumps(edit_data), headers=Release.CUSTOM_HEADERS) successful = self._boolean(r, 200, 404) if successful: self.__init__(r.json(), self) # depends on [control=['if'], data=[]] return successful
def enqueue_task(self, source, *args): """ Enqueue a task execution. It will run in the background as soon as the coordinator clears it to do so. """ yield from self.cell.coord.enqueue(self) route = Route(source, self.cell, self.spec, self.emit) self.cell.loop.create_task(self.coord_wrap(route, *args)) # To guarantee that the event loop works fluidly, we manually yield # once. The coordinator enqueue coroutine is not required to yield so # this ensures we avoid various forms of event starvation regardless. yield
def function[enqueue_task, parameter[self, source]]: constant[ Enqueue a task execution. It will run in the background as soon as the coordinator clears it to do so. ] <ast.YieldFrom object at 0x7da204621300> variable[route] assign[=] call[name[Route], parameter[name[source], name[self].cell, name[self].spec, name[self].emit]] call[name[self].cell.loop.create_task, parameter[call[name[self].coord_wrap, parameter[name[route], <ast.Starred object at 0x7da1b23442b0>]]]] <ast.Yield object at 0x7da1b2346710>
keyword[def] identifier[enqueue_task] ( identifier[self] , identifier[source] ,* identifier[args] ): literal[string] keyword[yield] keyword[from] identifier[self] . identifier[cell] . identifier[coord] . identifier[enqueue] ( identifier[self] ) identifier[route] = identifier[Route] ( identifier[source] , identifier[self] . identifier[cell] , identifier[self] . identifier[spec] , identifier[self] . identifier[emit] ) identifier[self] . identifier[cell] . identifier[loop] . identifier[create_task] ( identifier[self] . identifier[coord_wrap] ( identifier[route] ,* identifier[args] )) keyword[yield]
def enqueue_task(self, source, *args): """ Enqueue a task execution. It will run in the background as soon as the coordinator clears it to do so. """ yield from self.cell.coord.enqueue(self) route = Route(source, self.cell, self.spec, self.emit) self.cell.loop.create_task(self.coord_wrap(route, *args)) # To guarantee that the event loop works fluidly, we manually yield # once. The coordinator enqueue coroutine is not required to yield so # this ensures we avoid various forms of event starvation regardless. yield
def generateFromRaster(self, elevation_raster, shapefile_path=None, out_elevation_grid=None, resample_method=gdalconst.GRA_Average, load_raster_to_db=True): """ Generates an elevation grid for the GSSHA simulation from an elevation raster Example:: from gsshapy.orm import ProjectFile, ElevationGridFile from gsshapy.lib import db_tools as dbt gssha_directory = '/gsshapy/tests/grid_standard/gssha_project' elevation_raster = 'elevation.tif' project_manager, db_sessionmaker = \ dbt.get_project_session('grid_standard', gssha_directory) db_session = db_sessionmaker() # read project file project_manager.readInput(directory=gssha_directory, projectFileName='grid_standard.prj', session=db_session) # generate elevation grid elevation_grid = ElevationGridFile(session=db_session, project_file=project_manager) elevation_grid.generateFromRaster(elevation_raster) # write out updated parameters project_manager.writeInput(session=db_session, directory=gssha_directory, name='grid_standard') """ if not self.projectFile: raise ValueError("Must be connected to project file ...") # make sure paths are absolute as the working directory changes elevation_raster = os.path.abspath(elevation_raster) shapefile_path = os.path.abspath(shapefile_path) # must match elevation mask grid mask_grid = self.projectFile.getGrid() if out_elevation_grid is None: out_elevation_grid = '{0}.{1}'.format(self.projectFile.name, self.fileExtension) elevation_grid = resample_grid(elevation_raster, mask_grid, resample_method=resample_method, as_gdal_grid=True) with tmp_chdir(self.projectFile.project_directory): elevation_grid.to_grass_ascii(out_elevation_grid, print_nodata=False) # read raster into object if load_raster_to_db: self._load_raster_text(out_elevation_grid) self.filename = out_elevation_grid self.projectFile.setCard("ELEVATION", out_elevation_grid, add_quotes=True) # find outlet and add slope self.projectFile.findOutlet(shapefile_path)
def function[generateFromRaster, parameter[self, elevation_raster, shapefile_path, out_elevation_grid, resample_method, load_raster_to_db]]: constant[ Generates an elevation grid for the GSSHA simulation from an elevation raster Example:: from gsshapy.orm import ProjectFile, ElevationGridFile from gsshapy.lib import db_tools as dbt gssha_directory = '/gsshapy/tests/grid_standard/gssha_project' elevation_raster = 'elevation.tif' project_manager, db_sessionmaker = dbt.get_project_session('grid_standard', gssha_directory) db_session = db_sessionmaker() # read project file project_manager.readInput(directory=gssha_directory, projectFileName='grid_standard.prj', session=db_session) # generate elevation grid elevation_grid = ElevationGridFile(session=db_session, project_file=project_manager) elevation_grid.generateFromRaster(elevation_raster) # write out updated parameters project_manager.writeInput(session=db_session, directory=gssha_directory, name='grid_standard') ] if <ast.UnaryOp object at 0x7da18f811b40> begin[:] <ast.Raise object at 0x7da18f812770> variable[elevation_raster] assign[=] call[name[os].path.abspath, parameter[name[elevation_raster]]] variable[shapefile_path] assign[=] call[name[os].path.abspath, parameter[name[shapefile_path]]] variable[mask_grid] assign[=] call[name[self].projectFile.getGrid, parameter[]] if compare[name[out_elevation_grid] is constant[None]] begin[:] variable[out_elevation_grid] assign[=] call[constant[{0}.{1}].format, parameter[name[self].projectFile.name, name[self].fileExtension]] variable[elevation_grid] assign[=] call[name[resample_grid], parameter[name[elevation_raster], name[mask_grid]]] with call[name[tmp_chdir], parameter[name[self].projectFile.project_directory]] begin[:] call[name[elevation_grid].to_grass_ascii, parameter[name[out_elevation_grid]]] if name[load_raster_to_db] begin[:] call[name[self]._load_raster_text, parameter[name[out_elevation_grid]]] name[self].filename assign[=] name[out_elevation_grid] call[name[self].projectFile.setCard, parameter[constant[ELEVATION], name[out_elevation_grid]]] call[name[self].projectFile.findOutlet, parameter[name[shapefile_path]]]
keyword[def] identifier[generateFromRaster] ( identifier[self] , identifier[elevation_raster] , identifier[shapefile_path] = keyword[None] , identifier[out_elevation_grid] = keyword[None] , identifier[resample_method] = identifier[gdalconst] . identifier[GRA_Average] , identifier[load_raster_to_db] = keyword[True] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[projectFile] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[elevation_raster] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[elevation_raster] ) identifier[shapefile_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[shapefile_path] ) identifier[mask_grid] = identifier[self] . identifier[projectFile] . identifier[getGrid] () keyword[if] identifier[out_elevation_grid] keyword[is] keyword[None] : identifier[out_elevation_grid] = literal[string] . identifier[format] ( identifier[self] . identifier[projectFile] . identifier[name] , identifier[self] . identifier[fileExtension] ) identifier[elevation_grid] = identifier[resample_grid] ( identifier[elevation_raster] , identifier[mask_grid] , identifier[resample_method] = identifier[resample_method] , identifier[as_gdal_grid] = keyword[True] ) keyword[with] identifier[tmp_chdir] ( identifier[self] . identifier[projectFile] . identifier[project_directory] ): identifier[elevation_grid] . identifier[to_grass_ascii] ( identifier[out_elevation_grid] , identifier[print_nodata] = keyword[False] ) keyword[if] identifier[load_raster_to_db] : identifier[self] . identifier[_load_raster_text] ( identifier[out_elevation_grid] ) identifier[self] . identifier[filename] = identifier[out_elevation_grid] identifier[self] . identifier[projectFile] . identifier[setCard] ( literal[string] , identifier[out_elevation_grid] , identifier[add_quotes] = keyword[True] ) identifier[self] . identifier[projectFile] . identifier[findOutlet] ( identifier[shapefile_path] )
def generateFromRaster(self, elevation_raster, shapefile_path=None, out_elevation_grid=None, resample_method=gdalconst.GRA_Average, load_raster_to_db=True): """ Generates an elevation grid for the GSSHA simulation from an elevation raster Example:: from gsshapy.orm import ProjectFile, ElevationGridFile from gsshapy.lib import db_tools as dbt gssha_directory = '/gsshapy/tests/grid_standard/gssha_project' elevation_raster = 'elevation.tif' project_manager, db_sessionmaker = dbt.get_project_session('grid_standard', gssha_directory) db_session = db_sessionmaker() # read project file project_manager.readInput(directory=gssha_directory, projectFileName='grid_standard.prj', session=db_session) # generate elevation grid elevation_grid = ElevationGridFile(session=db_session, project_file=project_manager) elevation_grid.generateFromRaster(elevation_raster) # write out updated parameters project_manager.writeInput(session=db_session, directory=gssha_directory, name='grid_standard') """ if not self.projectFile: raise ValueError('Must be connected to project file ...') # depends on [control=['if'], data=[]] # make sure paths are absolute as the working directory changes elevation_raster = os.path.abspath(elevation_raster) shapefile_path = os.path.abspath(shapefile_path) # must match elevation mask grid mask_grid = self.projectFile.getGrid() if out_elevation_grid is None: out_elevation_grid = '{0}.{1}'.format(self.projectFile.name, self.fileExtension) # depends on [control=['if'], data=['out_elevation_grid']] elevation_grid = resample_grid(elevation_raster, mask_grid, resample_method=resample_method, as_gdal_grid=True) with tmp_chdir(self.projectFile.project_directory): elevation_grid.to_grass_ascii(out_elevation_grid, print_nodata=False) # read raster into object if load_raster_to_db: self._load_raster_text(out_elevation_grid) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] self.filename = out_elevation_grid self.projectFile.setCard('ELEVATION', out_elevation_grid, add_quotes=True) # find outlet and add slope self.projectFile.findOutlet(shapefile_path)
def _populate_rv(self, dataset, **kwargs): """ Populate columns necessary for an RV dataset This should not be called directly, but rather via :meth:`Body.populate_observable` or :meth:`System.populate_observables` """ logger.debug("{}._populate_rv(dataset={})".format(self.component, dataset)) # We need to fill all the flux-related columns so that we can weigh each # triangle's rv by its flux in the requested passband. lc_cols = self._populate_lc(dataset, **kwargs) # rv per element is just the z-component of the velocity vectory. Note # the change in sign from our right-handed system to rv conventions. # These will be weighted by the fluxes when integrating rvs = -1*self.mesh.velocities.for_computations[:,2] # Gravitational redshift if self.do_rv_grav: rv_grav = c.G*(self.mass*u.solMass)/(self.instantaneous_rpole*u.solRad)/c.c # rvs are in solRad/d internally rv_grav = rv_grav.to('solRad/d').value rvs += rv_grav cols = lc_cols cols['rvs'] = rvs return cols
def function[_populate_rv, parameter[self, dataset]]: constant[ Populate columns necessary for an RV dataset This should not be called directly, but rather via :meth:`Body.populate_observable` or :meth:`System.populate_observables` ] call[name[logger].debug, parameter[call[constant[{}._populate_rv(dataset={})].format, parameter[name[self].component, name[dataset]]]]] variable[lc_cols] assign[=] call[name[self]._populate_lc, parameter[name[dataset]]] variable[rvs] assign[=] binary_operation[<ast.UnaryOp object at 0x7da20ed9a8f0> * call[name[self].mesh.velocities.for_computations][tuple[[<ast.Slice object at 0x7da1b26ae950>, <ast.Constant object at 0x7da1b26af130>]]]] if name[self].do_rv_grav begin[:] variable[rv_grav] assign[=] binary_operation[binary_operation[binary_operation[name[c].G * binary_operation[name[self].mass * name[u].solMass]] / binary_operation[name[self].instantaneous_rpole * name[u].solRad]] / name[c].c] variable[rv_grav] assign[=] call[name[rv_grav].to, parameter[constant[solRad/d]]].value <ast.AugAssign object at 0x7da1b26aca00> variable[cols] assign[=] name[lc_cols] call[name[cols]][constant[rvs]] assign[=] name[rvs] return[name[cols]]
keyword[def] identifier[_populate_rv] ( identifier[self] , identifier[dataset] ,** identifier[kwargs] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[component] , identifier[dataset] )) identifier[lc_cols] = identifier[self] . identifier[_populate_lc] ( identifier[dataset] ,** identifier[kwargs] ) identifier[rvs] =- literal[int] * identifier[self] . identifier[mesh] . identifier[velocities] . identifier[for_computations] [:, literal[int] ] keyword[if] identifier[self] . identifier[do_rv_grav] : identifier[rv_grav] = identifier[c] . identifier[G] *( identifier[self] . identifier[mass] * identifier[u] . identifier[solMass] )/( identifier[self] . identifier[instantaneous_rpole] * identifier[u] . identifier[solRad] )/ identifier[c] . identifier[c] identifier[rv_grav] = identifier[rv_grav] . identifier[to] ( literal[string] ). identifier[value] identifier[rvs] += identifier[rv_grav] identifier[cols] = identifier[lc_cols] identifier[cols] [ literal[string] ]= identifier[rvs] keyword[return] identifier[cols]
def _populate_rv(self, dataset, **kwargs): """ Populate columns necessary for an RV dataset This should not be called directly, but rather via :meth:`Body.populate_observable` or :meth:`System.populate_observables` """ logger.debug('{}._populate_rv(dataset={})'.format(self.component, dataset)) # We need to fill all the flux-related columns so that we can weigh each # triangle's rv by its flux in the requested passband. lc_cols = self._populate_lc(dataset, **kwargs) # rv per element is just the z-component of the velocity vectory. Note # the change in sign from our right-handed system to rv conventions. # These will be weighted by the fluxes when integrating rvs = -1 * self.mesh.velocities.for_computations[:, 2] # Gravitational redshift if self.do_rv_grav: rv_grav = c.G * (self.mass * u.solMass) / (self.instantaneous_rpole * u.solRad) / c.c # rvs are in solRad/d internally rv_grav = rv_grav.to('solRad/d').value rvs += rv_grav # depends on [control=['if'], data=[]] cols = lc_cols cols['rvs'] = rvs return cols
def parse_commit_message(message: str) -> Tuple[int, str, Optional[str], Tuple[str, str, str]]: """ Parses a commit message according to the 1.0 version of python-semantic-release. It expects a tag of some sort in the commit message and will use the rest of the first line as changelog content. :param message: A string of a commit message. :raises UnknownCommitMessageStyleError: If it does not recognise the commit style :return: A tuple of (level to bump, type of change, scope of change, a tuple with descriptions) """ parsed = re_parser.match(message) if not parsed: raise UnknownCommitMessageStyleError( 'Unable to parse the given commit message: {0}'.format(message) ) subject = parsed.group('subject') if config.get('semantic_release', 'minor_tag') in message: level = 'feature' level_bump = 2 if subject: subject = subject.replace(config.get('semantic_release', 'minor_tag'.format(level)), '') elif config.get('semantic_release', 'fix_tag') in message: level = 'fix' level_bump = 1 if subject: subject = subject.replace(config.get('semantic_release', 'fix_tag'.format(level)), '') else: raise UnknownCommitMessageStyleError( 'Unable to parse the given commit message: {0}'.format(message) ) if parsed.group('text') and 'BREAKING CHANGE' in parsed.group('text'): level = 'breaking' level_bump = 3 body, footer = parse_text_block(parsed.group('text')) return level_bump, level, None, (subject.strip(), body.strip(), footer.strip())
def function[parse_commit_message, parameter[message]]: constant[ Parses a commit message according to the 1.0 version of python-semantic-release. It expects a tag of some sort in the commit message and will use the rest of the first line as changelog content. :param message: A string of a commit message. :raises UnknownCommitMessageStyleError: If it does not recognise the commit style :return: A tuple of (level to bump, type of change, scope of change, a tuple with descriptions) ] variable[parsed] assign[=] call[name[re_parser].match, parameter[name[message]]] if <ast.UnaryOp object at 0x7da1b2345180> begin[:] <ast.Raise object at 0x7da1b2347e80> variable[subject] assign[=] call[name[parsed].group, parameter[constant[subject]]] if compare[call[name[config].get, parameter[constant[semantic_release], constant[minor_tag]]] in name[message]] begin[:] variable[level] assign[=] constant[feature] variable[level_bump] assign[=] constant[2] if name[subject] begin[:] variable[subject] assign[=] call[name[subject].replace, parameter[call[name[config].get, parameter[constant[semantic_release], call[constant[minor_tag].format, parameter[name[level]]]]], constant[]]] if <ast.BoolOp object at 0x7da20c7c9600> begin[:] variable[level] assign[=] constant[breaking] variable[level_bump] assign[=] constant[3] <ast.Tuple object at 0x7da20c7c8e80> assign[=] call[name[parse_text_block], parameter[call[name[parsed].group, parameter[constant[text]]]]] return[tuple[[<ast.Name object at 0x7da20c7c9d50>, <ast.Name object at 0x7da20c7caaa0>, <ast.Constant object at 0x7da20c7cbcd0>, <ast.Tuple object at 0x7da1b2344d30>]]]
keyword[def] identifier[parse_commit_message] ( identifier[message] : identifier[str] )-> identifier[Tuple] [ identifier[int] , identifier[str] , identifier[Optional] [ identifier[str] ], identifier[Tuple] [ identifier[str] , identifier[str] , identifier[str] ]]: literal[string] identifier[parsed] = identifier[re_parser] . identifier[match] ( identifier[message] ) keyword[if] keyword[not] identifier[parsed] : keyword[raise] identifier[UnknownCommitMessageStyleError] ( literal[string] . identifier[format] ( identifier[message] ) ) identifier[subject] = identifier[parsed] . identifier[group] ( literal[string] ) keyword[if] identifier[config] . identifier[get] ( literal[string] , literal[string] ) keyword[in] identifier[message] : identifier[level] = literal[string] identifier[level_bump] = literal[int] keyword[if] identifier[subject] : identifier[subject] = identifier[subject] . identifier[replace] ( identifier[config] . identifier[get] ( literal[string] , literal[string] . identifier[format] ( identifier[level] )), literal[string] ) keyword[elif] identifier[config] . identifier[get] ( literal[string] , literal[string] ) keyword[in] identifier[message] : identifier[level] = literal[string] identifier[level_bump] = literal[int] keyword[if] identifier[subject] : identifier[subject] = identifier[subject] . identifier[replace] ( identifier[config] . identifier[get] ( literal[string] , literal[string] . identifier[format] ( identifier[level] )), literal[string] ) keyword[else] : keyword[raise] identifier[UnknownCommitMessageStyleError] ( literal[string] . identifier[format] ( identifier[message] ) ) keyword[if] identifier[parsed] . identifier[group] ( literal[string] ) keyword[and] literal[string] keyword[in] identifier[parsed] . identifier[group] ( literal[string] ): identifier[level] = literal[string] identifier[level_bump] = literal[int] identifier[body] , identifier[footer] = identifier[parse_text_block] ( identifier[parsed] . identifier[group] ( literal[string] )) keyword[return] identifier[level_bump] , identifier[level] , keyword[None] ,( identifier[subject] . identifier[strip] (), identifier[body] . identifier[strip] (), identifier[footer] . identifier[strip] ())
def parse_commit_message(message: str) -> Tuple[int, str, Optional[str], Tuple[str, str, str]]: """ Parses a commit message according to the 1.0 version of python-semantic-release. It expects a tag of some sort in the commit message and will use the rest of the first line as changelog content. :param message: A string of a commit message. :raises UnknownCommitMessageStyleError: If it does not recognise the commit style :return: A tuple of (level to bump, type of change, scope of change, a tuple with descriptions) """ parsed = re_parser.match(message) if not parsed: raise UnknownCommitMessageStyleError('Unable to parse the given commit message: {0}'.format(message)) # depends on [control=['if'], data=[]] subject = parsed.group('subject') if config.get('semantic_release', 'minor_tag') in message: level = 'feature' level_bump = 2 if subject: subject = subject.replace(config.get('semantic_release', 'minor_tag'.format(level)), '') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif config.get('semantic_release', 'fix_tag') in message: level = 'fix' level_bump = 1 if subject: subject = subject.replace(config.get('semantic_release', 'fix_tag'.format(level)), '') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: raise UnknownCommitMessageStyleError('Unable to parse the given commit message: {0}'.format(message)) if parsed.group('text') and 'BREAKING CHANGE' in parsed.group('text'): level = 'breaking' level_bump = 3 # depends on [control=['if'], data=[]] (body, footer) = parse_text_block(parsed.group('text')) return (level_bump, level, None, (subject.strip(), body.strip(), footer.strip()))
def xml_to_tupletree_sax(xml_string, meaning, conn_id=None): """ Parse an XML string into tupletree with SAX parser. Parses the string using the class CIMContentHandler and returns the root element. As a SAX parser it uses minimal memory. This is a replacement for the previous parser (xml_to_tuple) which used the dom parser. Parameters: xml_string (:term:`string`): A unicode string (when called for embedded objects) or UTF-8 encoded byte string (when called for CIM-XML replies) containing the XML to be parsed. meaning (:term:`string`): Short text with meaning of the XML string, for messages in exceptions. conn_id (:term:`connection id`): Connection ID to be used in any exceptions that may be raised. Returns: tupletree tuple with parsed XML tree Raises: pywbem.XMLParseError: Error detected by SAX parser or UTF-8/XML checkers """ handler = CIMContentHandler() # The following conversion to a byte string is required for two reasons: # 1. xml.sax.parseString() raises UnicodeEncodeError for unicode strings # that contain any non-ASCII characters (despite its Python 2.7 # documentation which states that would be supported). # 2. The SAX parser in Python 2.6 and 3.4 (pywbem does not support 3.1 - # 3.3) does not accept unicode strings, raising: # SAXParseException: "<unknown>:1:1: not well-formed (invalid token)" # or: # TypeError: 'str' does not support the buffer interface xml_string = _ensure_bytes(xml_string) try: xml.sax.parseString(xml_string, handler, None) except xml.sax.SAXParseException as exc: # xml.sax.parse() is documented to only raise SAXParseException. In # earlier versions of this code, xml.sax.parseString() has been found # to raise UnicodeEncodeError when unicode strings were passed, but # that is no longer done, so that exception is no longer caught. # Other exception types are unexpected and will perculate upwards. # Traceback of the exception that was caught org_tb = sys.exc_info()[2] # Improve quality of exception info (the check...() functions may # raise XMLParseError): _chk_str = check_invalid_utf8_sequences(xml_string, meaning, conn_id) check_invalid_xml_chars(_chk_str, meaning, conn_id) # If the checks above pass, re-raise the SAX exception info, with its # original traceback info: lineno, colno, new_colno, line = get_failing_line(xml_string, str(exc)) if lineno is not None: marker_line = ' ' * (new_colno - 1) + '^' xml_msg = _format( "Line {0} column {1} of XML string (as binary UTF-8 string):\n" "{2}\n" "{3}", lineno, colno, line, marker_line) else: xml_msg = _format( "XML string (as binary UTF-8 string):\n" "{0}", line) pe = XMLParseError( _format("XML parsing error encountered in {0}: {1}\n{2}\n", meaning, exc, xml_msg), conn_id=conn_id) six.reraise(type(pe), pe, org_tb) # ignore this call in traceback! return handler.root
def function[xml_to_tupletree_sax, parameter[xml_string, meaning, conn_id]]: constant[ Parse an XML string into tupletree with SAX parser. Parses the string using the class CIMContentHandler and returns the root element. As a SAX parser it uses minimal memory. This is a replacement for the previous parser (xml_to_tuple) which used the dom parser. Parameters: xml_string (:term:`string`): A unicode string (when called for embedded objects) or UTF-8 encoded byte string (when called for CIM-XML replies) containing the XML to be parsed. meaning (:term:`string`): Short text with meaning of the XML string, for messages in exceptions. conn_id (:term:`connection id`): Connection ID to be used in any exceptions that may be raised. Returns: tupletree tuple with parsed XML tree Raises: pywbem.XMLParseError: Error detected by SAX parser or UTF-8/XML checkers ] variable[handler] assign[=] call[name[CIMContentHandler], parameter[]] variable[xml_string] assign[=] call[name[_ensure_bytes], parameter[name[xml_string]]] <ast.Try object at 0x7da1b0c94670> return[name[handler].root]
keyword[def] identifier[xml_to_tupletree_sax] ( identifier[xml_string] , identifier[meaning] , identifier[conn_id] = keyword[None] ): literal[string] identifier[handler] = identifier[CIMContentHandler] () identifier[xml_string] = identifier[_ensure_bytes] ( identifier[xml_string] ) keyword[try] : identifier[xml] . identifier[sax] . identifier[parseString] ( identifier[xml_string] , identifier[handler] , keyword[None] ) keyword[except] identifier[xml] . identifier[sax] . identifier[SAXParseException] keyword[as] identifier[exc] : identifier[org_tb] = identifier[sys] . identifier[exc_info] ()[ literal[int] ] identifier[_chk_str] = identifier[check_invalid_utf8_sequences] ( identifier[xml_string] , identifier[meaning] , identifier[conn_id] ) identifier[check_invalid_xml_chars] ( identifier[_chk_str] , identifier[meaning] , identifier[conn_id] ) identifier[lineno] , identifier[colno] , identifier[new_colno] , identifier[line] = identifier[get_failing_line] ( identifier[xml_string] , identifier[str] ( identifier[exc] )) keyword[if] identifier[lineno] keyword[is] keyword[not] keyword[None] : identifier[marker_line] = literal[string] *( identifier[new_colno] - literal[int] )+ literal[string] identifier[xml_msg] = identifier[_format] ( literal[string] literal[string] literal[string] , identifier[lineno] , identifier[colno] , identifier[line] , identifier[marker_line] ) keyword[else] : identifier[xml_msg] = identifier[_format] ( literal[string] literal[string] , identifier[line] ) identifier[pe] = identifier[XMLParseError] ( identifier[_format] ( literal[string] , identifier[meaning] , identifier[exc] , identifier[xml_msg] ), identifier[conn_id] = identifier[conn_id] ) identifier[six] . identifier[reraise] ( identifier[type] ( identifier[pe] ), identifier[pe] , identifier[org_tb] ) keyword[return] identifier[handler] . identifier[root]
def xml_to_tupletree_sax(xml_string, meaning, conn_id=None): """ Parse an XML string into tupletree with SAX parser. Parses the string using the class CIMContentHandler and returns the root element. As a SAX parser it uses minimal memory. This is a replacement for the previous parser (xml_to_tuple) which used the dom parser. Parameters: xml_string (:term:`string`): A unicode string (when called for embedded objects) or UTF-8 encoded byte string (when called for CIM-XML replies) containing the XML to be parsed. meaning (:term:`string`): Short text with meaning of the XML string, for messages in exceptions. conn_id (:term:`connection id`): Connection ID to be used in any exceptions that may be raised. Returns: tupletree tuple with parsed XML tree Raises: pywbem.XMLParseError: Error detected by SAX parser or UTF-8/XML checkers """ handler = CIMContentHandler() # The following conversion to a byte string is required for two reasons: # 1. xml.sax.parseString() raises UnicodeEncodeError for unicode strings # that contain any non-ASCII characters (despite its Python 2.7 # documentation which states that would be supported). # 2. The SAX parser in Python 2.6 and 3.4 (pywbem does not support 3.1 - # 3.3) does not accept unicode strings, raising: # SAXParseException: "<unknown>:1:1: not well-formed (invalid token)" # or: # TypeError: 'str' does not support the buffer interface xml_string = _ensure_bytes(xml_string) try: xml.sax.parseString(xml_string, handler, None) # depends on [control=['try'], data=[]] except xml.sax.SAXParseException as exc: # xml.sax.parse() is documented to only raise SAXParseException. In # earlier versions of this code, xml.sax.parseString() has been found # to raise UnicodeEncodeError when unicode strings were passed, but # that is no longer done, so that exception is no longer caught. # Other exception types are unexpected and will perculate upwards. # Traceback of the exception that was caught org_tb = sys.exc_info()[2] # Improve quality of exception info (the check...() functions may # raise XMLParseError): _chk_str = check_invalid_utf8_sequences(xml_string, meaning, conn_id) check_invalid_xml_chars(_chk_str, meaning, conn_id) # If the checks above pass, re-raise the SAX exception info, with its # original traceback info: (lineno, colno, new_colno, line) = get_failing_line(xml_string, str(exc)) if lineno is not None: marker_line = ' ' * (new_colno - 1) + '^' xml_msg = _format('Line {0} column {1} of XML string (as binary UTF-8 string):\n{2}\n{3}', lineno, colno, line, marker_line) # depends on [control=['if'], data=['lineno']] else: xml_msg = _format('XML string (as binary UTF-8 string):\n{0}', line) pe = XMLParseError(_format('XML parsing error encountered in {0}: {1}\n{2}\n', meaning, exc, xml_msg), conn_id=conn_id) six.reraise(type(pe), pe, org_tb) # ignore this call in traceback! # depends on [control=['except'], data=['exc']] return handler.root
def delete(self, request, response): """Processes a `DELETE` request.""" if self.slug is None: # Mass-DELETE is not implemented. raise http.exceptions.NotImplemented() # Ensure we're allowed to destroy a resource. self.assert_operations('destroy') # Delegate to `destroy` to destroy the item. self.destroy() # Build the response object. self.response.status = http.client.NO_CONTENT self.make_response()
def function[delete, parameter[self, request, response]]: constant[Processes a `DELETE` request.] if compare[name[self].slug is constant[None]] begin[:] <ast.Raise object at 0x7da1b008e440> call[name[self].assert_operations, parameter[constant[destroy]]] call[name[self].destroy, parameter[]] name[self].response.status assign[=] name[http].client.NO_CONTENT call[name[self].make_response, parameter[]]
keyword[def] identifier[delete] ( identifier[self] , identifier[request] , identifier[response] ): literal[string] keyword[if] identifier[self] . identifier[slug] keyword[is] keyword[None] : keyword[raise] identifier[http] . identifier[exceptions] . identifier[NotImplemented] () identifier[self] . identifier[assert_operations] ( literal[string] ) identifier[self] . identifier[destroy] () identifier[self] . identifier[response] . identifier[status] = identifier[http] . identifier[client] . identifier[NO_CONTENT] identifier[self] . identifier[make_response] ()
def delete(self, request, response): """Processes a `DELETE` request.""" if self.slug is None: # Mass-DELETE is not implemented. raise http.exceptions.NotImplemented() # depends on [control=['if'], data=[]] # Ensure we're allowed to destroy a resource. self.assert_operations('destroy') # Delegate to `destroy` to destroy the item. self.destroy() # Build the response object. self.response.status = http.client.NO_CONTENT self.make_response()
def parse_line(line): """Parses a line of a text embedding file. Args: line: (str) One line of the text embedding file. Returns: A token string and its embedding vector in floats. """ columns = line.split() token = columns.pop(0) values = [float(column) for column in columns] return token, values
def function[parse_line, parameter[line]]: constant[Parses a line of a text embedding file. Args: line: (str) One line of the text embedding file. Returns: A token string and its embedding vector in floats. ] variable[columns] assign[=] call[name[line].split, parameter[]] variable[token] assign[=] call[name[columns].pop, parameter[constant[0]]] variable[values] assign[=] <ast.ListComp object at 0x7da1b20bad10> return[tuple[[<ast.Name object at 0x7da2044c20e0>, <ast.Name object at 0x7da2044c3220>]]]
keyword[def] identifier[parse_line] ( identifier[line] ): literal[string] identifier[columns] = identifier[line] . identifier[split] () identifier[token] = identifier[columns] . identifier[pop] ( literal[int] ) identifier[values] =[ identifier[float] ( identifier[column] ) keyword[for] identifier[column] keyword[in] identifier[columns] ] keyword[return] identifier[token] , identifier[values]
def parse_line(line): """Parses a line of a text embedding file. Args: line: (str) One line of the text embedding file. Returns: A token string and its embedding vector in floats. """ columns = line.split() token = columns.pop(0) values = [float(column) for column in columns] return (token, values)
async def create( cls, start_ip: str, end_ip: str, *, type: IPRangeType = IPRangeType.RESERVED, comment: str = None, subnet: Union[Subnet, int] = None): """ Create a `IPRange` in MAAS. :param start_ip: First IP address in the range (required). :type start_ip: `str` :parma end_ip: Last IP address in the range (required). :type end_ip: `str` :param type: Type of IP address range (optional). :type type: `IPRangeType` :param comment: Reason for the IP address range (optional). :type comment: `str` :param subnet: Subnet the IP address range should be created on (optional). By default MAAS will calculate the correct subnet based on the `start_ip` and `end_ip`. :type subnet: `Subnet` or `int` :returns: The created IPRange :rtype: `IPRange` """ if not isinstance(type, IPRangeType): raise TypeError( "type must be an IPRangeType, not %s" % TYPE(type).__name__) params = { 'start_ip': start_ip, 'end_ip': end_ip, 'type': type.value, } if comment is not None: params["comment"] = comment if subnet is not None: if isinstance(subnet, Subnet): params["subnet"] = subnet.id elif isinstance(subnet, int): params["subnet"] = subnet else: raise TypeError( "subnet must be Subnet or int, not %s" % ( TYPE(subnet).__class__)) return cls._object(await cls._handler.create(**params))
<ast.AsyncFunctionDef object at 0x7da20c76f3d0>
keyword[async] keyword[def] identifier[create] ( identifier[cls] , identifier[start_ip] : identifier[str] , identifier[end_ip] : identifier[str] ,*, identifier[type] : identifier[IPRangeType] = identifier[IPRangeType] . identifier[RESERVED] , identifier[comment] : identifier[str] = keyword[None] , identifier[subnet] : identifier[Union] [ identifier[Subnet] , identifier[int] ]= keyword[None] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[type] , identifier[IPRangeType] ): keyword[raise] identifier[TypeError] ( literal[string] % identifier[TYPE] ( identifier[type] ). identifier[__name__] ) identifier[params] ={ literal[string] : identifier[start_ip] , literal[string] : identifier[end_ip] , literal[string] : identifier[type] . identifier[value] , } keyword[if] identifier[comment] keyword[is] keyword[not] keyword[None] : identifier[params] [ literal[string] ]= identifier[comment] keyword[if] identifier[subnet] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[isinstance] ( identifier[subnet] , identifier[Subnet] ): identifier[params] [ literal[string] ]= identifier[subnet] . identifier[id] keyword[elif] identifier[isinstance] ( identifier[subnet] , identifier[int] ): identifier[params] [ literal[string] ]= identifier[subnet] keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] %( identifier[TYPE] ( identifier[subnet] ). identifier[__class__] )) keyword[return] identifier[cls] . identifier[_object] ( keyword[await] identifier[cls] . identifier[_handler] . identifier[create] (** identifier[params] ))
async def create(cls, start_ip: str, end_ip: str, *, type: IPRangeType=IPRangeType.RESERVED, comment: str=None, subnet: Union[Subnet, int]=None): """ Create a `IPRange` in MAAS. :param start_ip: First IP address in the range (required). :type start_ip: `str` :parma end_ip: Last IP address in the range (required). :type end_ip: `str` :param type: Type of IP address range (optional). :type type: `IPRangeType` :param comment: Reason for the IP address range (optional). :type comment: `str` :param subnet: Subnet the IP address range should be created on (optional). By default MAAS will calculate the correct subnet based on the `start_ip` and `end_ip`. :type subnet: `Subnet` or `int` :returns: The created IPRange :rtype: `IPRange` """ if not isinstance(type, IPRangeType): raise TypeError('type must be an IPRangeType, not %s' % TYPE(type).__name__) # depends on [control=['if'], data=[]] params = {'start_ip': start_ip, 'end_ip': end_ip, 'type': type.value} if comment is not None: params['comment'] = comment # depends on [control=['if'], data=['comment']] if subnet is not None: if isinstance(subnet, Subnet): params['subnet'] = subnet.id # depends on [control=['if'], data=[]] elif isinstance(subnet, int): params['subnet'] = subnet # depends on [control=['if'], data=[]] else: raise TypeError('subnet must be Subnet or int, not %s' % TYPE(subnet).__class__) # depends on [control=['if'], data=['subnet']] return cls._object(await cls._handler.create(**params))
def remove_widget(self): """ Removes the Component Widget from the engine. :return: Method success. :rtype: bool """ LOGGER.debug("> Removing '{0}' Component Widget.".format(self.__class__.__name__)) self.__preferences_manager.findChild(QGridLayout, "Others_Preferences_gridLayout").removeWidget(self) self.TCP_Client_Ui_groupBox.setParent(None) return True
def function[remove_widget, parameter[self]]: constant[ Removes the Component Widget from the engine. :return: Method success. :rtype: bool ] call[name[LOGGER].debug, parameter[call[constant[> Removing '{0}' Component Widget.].format, parameter[name[self].__class__.__name__]]]] call[call[name[self].__preferences_manager.findChild, parameter[name[QGridLayout], constant[Others_Preferences_gridLayout]]].removeWidget, parameter[name[self]]] call[name[self].TCP_Client_Ui_groupBox.setParent, parameter[constant[None]]] return[constant[True]]
keyword[def] identifier[remove_widget] ( identifier[self] ): literal[string] identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] )) identifier[self] . identifier[__preferences_manager] . identifier[findChild] ( identifier[QGridLayout] , literal[string] ). identifier[removeWidget] ( identifier[self] ) identifier[self] . identifier[TCP_Client_Ui_groupBox] . identifier[setParent] ( keyword[None] ) keyword[return] keyword[True]
def remove_widget(self): """ Removes the Component Widget from the engine. :return: Method success. :rtype: bool """ LOGGER.debug("> Removing '{0}' Component Widget.".format(self.__class__.__name__)) self.__preferences_manager.findChild(QGridLayout, 'Others_Preferences_gridLayout').removeWidget(self) self.TCP_Client_Ui_groupBox.setParent(None) return True
def factorset_product(*factorsets_list): r""" Base method used for product of factor sets. Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors set :math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`. Parameters ---------- factorsets_list: FactorSet1, FactorSet2, ..., FactorSetn All the factor sets to be multiplied Returns ------- Product of factorset in factorsets_list Examples -------- >>> from pgmpy.factors import FactorSet >>> from pgmpy.factors.discrete import DiscreteFactor >>> from pgmpy.factors import factorset_product >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8)) >>> factor_set1 = FactorSet(phi1, phi2) >>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8)) >>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8)) >>> factor_set2 = FactorSet(phi3, phi4) >>> factor_set3 = factorset_product(factor_set1, factor_set2) >>> print(factor_set3) set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7fb3a1933e90>, <DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7fb3a1933f10>, <DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7fb3a1933f90>, <DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7fb3a1933e10>]) """ if not all(isinstance(factorset, FactorSet) for factorset in factorsets_list): raise TypeError("Input parameters must be FactorSet instances") return reduce(lambda x, y: x.product(y, inplace=False), factorsets_list)
def function[factorset_product, parameter[]]: constant[ Base method used for product of factor sets. Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors set :math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`. Parameters ---------- factorsets_list: FactorSet1, FactorSet2, ..., FactorSetn All the factor sets to be multiplied Returns ------- Product of factorset in factorsets_list Examples -------- >>> from pgmpy.factors import FactorSet >>> from pgmpy.factors.discrete import DiscreteFactor >>> from pgmpy.factors import factorset_product >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8)) >>> factor_set1 = FactorSet(phi1, phi2) >>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8)) >>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8)) >>> factor_set2 = FactorSet(phi3, phi4) >>> factor_set3 = factorset_product(factor_set1, factor_set2) >>> print(factor_set3) set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7fb3a1933e90>, <DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7fb3a1933f10>, <DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7fb3a1933f90>, <DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7fb3a1933e10>]) ] if <ast.UnaryOp object at 0x7da18f00e470> begin[:] <ast.Raise object at 0x7da18f00d390> return[call[name[reduce], parameter[<ast.Lambda object at 0x7da18f00d210>, name[factorsets_list]]]]
keyword[def] identifier[factorset_product] (* identifier[factorsets_list] ): literal[string] keyword[if] keyword[not] identifier[all] ( identifier[isinstance] ( identifier[factorset] , identifier[FactorSet] ) keyword[for] identifier[factorset] keyword[in] identifier[factorsets_list] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[return] identifier[reduce] ( keyword[lambda] identifier[x] , identifier[y] : identifier[x] . identifier[product] ( identifier[y] , identifier[inplace] = keyword[False] ), identifier[factorsets_list] )
def factorset_product(*factorsets_list): """ Base method used for product of factor sets. Suppose :math:`\\vec\\phi_1` and :math:`\\vec\\phi_2` are two factor sets then their product is a another factors set :math:`\\vec\\phi_3 = \\vec\\phi_1 \\cup \\vec\\phi_2`. Parameters ---------- factorsets_list: FactorSet1, FactorSet2, ..., FactorSetn All the factor sets to be multiplied Returns ------- Product of factorset in factorsets_list Examples -------- >>> from pgmpy.factors import FactorSet >>> from pgmpy.factors.discrete import DiscreteFactor >>> from pgmpy.factors import factorset_product >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8)) >>> factor_set1 = FactorSet(phi1, phi2) >>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8)) >>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8)) >>> factor_set2 = FactorSet(phi3, phi4) >>> factor_set3 = factorset_product(factor_set1, factor_set2) >>> print(factor_set3) set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7fb3a1933e90>, <DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7fb3a1933f10>, <DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7fb3a1933f90>, <DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7fb3a1933e10>]) """ if not all((isinstance(factorset, FactorSet) for factorset in factorsets_list)): raise TypeError('Input parameters must be FactorSet instances') # depends on [control=['if'], data=[]] return reduce(lambda x, y: x.product(y, inplace=False), factorsets_list)
def _scan(self, type): """ Returns the matched text, and moves to the next token """ tok = self._scanner.token(self._pos, frozenset([type])) self._char_pos = tok[0] if tok[2] != type: raise SyntaxError("SyntaxError[@ char %s: %s]" % (repr(tok[0]), "Trying to find " + type)) self._pos += 1 return tok[3]
def function[_scan, parameter[self, type]]: constant[ Returns the matched text, and moves to the next token ] variable[tok] assign[=] call[name[self]._scanner.token, parameter[name[self]._pos, call[name[frozenset], parameter[list[[<ast.Name object at 0x7da18bccb070>]]]]]] name[self]._char_pos assign[=] call[name[tok]][constant[0]] if compare[call[name[tok]][constant[2]] not_equal[!=] name[type]] begin[:] <ast.Raise object at 0x7da18bcca860> <ast.AugAssign object at 0x7da207f02110> return[call[name[tok]][constant[3]]]
keyword[def] identifier[_scan] ( identifier[self] , identifier[type] ): literal[string] identifier[tok] = identifier[self] . identifier[_scanner] . identifier[token] ( identifier[self] . identifier[_pos] , identifier[frozenset] ([ identifier[type] ])) identifier[self] . identifier[_char_pos] = identifier[tok] [ literal[int] ] keyword[if] identifier[tok] [ literal[int] ]!= identifier[type] : keyword[raise] identifier[SyntaxError] ( literal[string] %( identifier[repr] ( identifier[tok] [ literal[int] ]), literal[string] + identifier[type] )) identifier[self] . identifier[_pos] += literal[int] keyword[return] identifier[tok] [ literal[int] ]
def _scan(self, type): """ Returns the matched text, and moves to the next token """ tok = self._scanner.token(self._pos, frozenset([type])) self._char_pos = tok[0] if tok[2] != type: raise SyntaxError('SyntaxError[@ char %s: %s]' % (repr(tok[0]), 'Trying to find ' + type)) # depends on [control=['if'], data=['type']] self._pos += 1 return tok[3]
def parse_single_ad(ad, global_names, common_words, args={}): """ An example extraction of a Backpage ad, with the following parameters: ad -> A dict representing an ad that is scraped as such: ad = items.BackpageScrapeItem( backpage_id=response.url.split('.')[0].split('/')[2].encode('utf-8'), date = str(self.static_now)[:10], posted_age = response.xpath("//p[@class='metaInfoDisplay']/text()").extract()[0].encode('utf-8'), posted_date = response.xpath("//div[@class='adInfo']/text()").extract()[0].encode('utf-8'), posted_title = response.xpath("//div[@id='postingTitle']//h1/text()").extract()[0].encode('utf-8'), posting_body= response.xpath("//div[@class='postingBody']").extract()[0].encode('utf-8'), text = response.body, url=response.url ) """ multiple_phones = False if 'multiple_phones' not in args else args['multiple_phones'] item = {} # Backpage category ## 1 --> FemaleEscorts ## 2 --> BodyRubs ## 3 --> Dating section (after 1/9/17 Backpage shutdown of FemaleEscorts and BodyRubs) ## 4 --> TherapeuticMassage section (1/23/17 partial begin date, 1/24/17 full begin date) if 'therapeuticmassage' in ad['url'].split('.backpage.com/')[1].lower(): item['category'] = 4 else: item['category'] = 3 # parse age if item['category'] == 4: item['age'] = -1 else: item['age'] = int(re.sub(r'\D', '', ad['posted_age'][14:])) # Get rid of any posted age under 10 and over 60. Assign to -1 if it is invalid if item['age'] < 10 or item['age'] > 60: item['age'] = -1 ageless_title = re.sub(r' {1,2}- {1,2}\d\d\Z', '', ad['posted_title']) ad_text = json.dumps(ad['text'].lower()) # Get filtered, decomposed list of body + title parts. # 'Parts' are separated only when there is a newline (\n) character in the ad. decoded_body = decode_unicode(ad['posting_body'], replace_boo=False)[1] decoded_title = decode_unicode(ageless_title, replace_boo=False)[1] parts = get_clean_parts(decoded_body, decoded_title) loc_section = get_location_section(ad_text) loc_parts = get_clean_loc_parts(loc_section, is_location=True) all_parts = { 'body': parts, 'loc': [','.join(loc_parts)] } item['time'] = parser.parse_time(repr(ad['posted_date'])) item['post_id'] = parser.parse_posting_id(ad_text, ad['city']) # Find/remove phone number ret_data = parser.parse_phone(all_parts['body'], allow_multiple=multiple_phones) item['phone'] = ret_data[0] all_parts['body'] = ret_data[1] if multiple_phones: # parse phone(s) from the URL url = ad['url'].replace('http://', '') url = re.sub(r'\w+\.backpage\.com/\w+/', '', url) url = url.split('/') ret_data = parser.parse_phone(url, allow_multiple=multiple_phones) if ret_data[0]: item['phone'].extend(ret_data[0]) item['phone'] = list(set(item['phone'])) # Find/remove "No blacks allowed" ret_data = parser.parse_no_blacks(all_parts['body']) item['no_blacks'] = ret_data[0] all_parts['body'] = ret_data[1] # Find/remove ethnicity(s) ret_data = parser.parse_ethnicity(all_parts['body']) item['ethnicity'] = ret_data[0] all_parts['body'] = ret_data[1] # Find/remove "trucker friendly" ret_data = parser.parse_truckers(all_parts['body']) tf = ret_data[0] if not tf: tf = parser.parse_truckers(all_parts['loc'])[0] item['trucker'] = tf all_parts['body'] = ret_data[1] # Find names item['name'] = parser.parse_name(all_parts['body'], global_names, common_words) # Indicators that may indicate a HT victim. Still in early stages item['indicators'] = parser.parse_indicators(all_parts['body'], item['ethnicity']) # Parse whether or not the ad lists 'military' friendly item['military'] = parser.parse_military_friendly(all_parts['body']) return item
def function[parse_single_ad, parameter[ad, global_names, common_words, args]]: constant[ An example extraction of a Backpage ad, with the following parameters: ad -> A dict representing an ad that is scraped as such: ad = items.BackpageScrapeItem( backpage_id=response.url.split('.')[0].split('/')[2].encode('utf-8'), date = str(self.static_now)[:10], posted_age = response.xpath("//p[@class='metaInfoDisplay']/text()").extract()[0].encode('utf-8'), posted_date = response.xpath("//div[@class='adInfo']/text()").extract()[0].encode('utf-8'), posted_title = response.xpath("//div[@id='postingTitle']//h1/text()").extract()[0].encode('utf-8'), posting_body= response.xpath("//div[@class='postingBody']").extract()[0].encode('utf-8'), text = response.body, url=response.url ) ] variable[multiple_phones] assign[=] <ast.IfExp object at 0x7da1b0bdbdc0> variable[item] assign[=] dictionary[[], []] if compare[constant[therapeuticmassage] in call[call[call[call[name[ad]][constant[url]].split, parameter[constant[.backpage.com/]]]][constant[1]].lower, parameter[]]] begin[:] call[name[item]][constant[category]] assign[=] constant[4] if compare[call[name[item]][constant[category]] equal[==] constant[4]] begin[:] call[name[item]][constant[age]] assign[=] <ast.UnaryOp object at 0x7da1b0ccbeb0> if <ast.BoolOp object at 0x7da1b0ca4d90> begin[:] call[name[item]][constant[age]] assign[=] <ast.UnaryOp object at 0x7da1b0ca5060> variable[ageless_title] assign[=] call[name[re].sub, parameter[constant[ {1,2}- {1,2}\d\d\Z], constant[], call[name[ad]][constant[posted_title]]]] variable[ad_text] assign[=] call[name[json].dumps, parameter[call[call[name[ad]][constant[text]].lower, parameter[]]]] variable[decoded_body] assign[=] call[call[name[decode_unicode], parameter[call[name[ad]][constant[posting_body]]]]][constant[1]] variable[decoded_title] assign[=] call[call[name[decode_unicode], parameter[name[ageless_title]]]][constant[1]] variable[parts] assign[=] call[name[get_clean_parts], parameter[name[decoded_body], name[decoded_title]]] variable[loc_section] assign[=] call[name[get_location_section], parameter[name[ad_text]]] variable[loc_parts] assign[=] call[name[get_clean_loc_parts], parameter[name[loc_section]]] variable[all_parts] assign[=] dictionary[[<ast.Constant object at 0x7da1b0bdb940>, <ast.Constant object at 0x7da1b0bd9930>], [<ast.Name object at 0x7da1b0bd89d0>, <ast.List object at 0x7da1b0bdaad0>]] call[name[item]][constant[time]] assign[=] call[name[parser].parse_time, parameter[call[name[repr], parameter[call[name[ad]][constant[posted_date]]]]]] call[name[item]][constant[post_id]] assign[=] call[name[parser].parse_posting_id, parameter[name[ad_text], call[name[ad]][constant[city]]]] variable[ret_data] assign[=] call[name[parser].parse_phone, parameter[call[name[all_parts]][constant[body]]]] call[name[item]][constant[phone]] assign[=] call[name[ret_data]][constant[0]] call[name[all_parts]][constant[body]] assign[=] call[name[ret_data]][constant[1]] if name[multiple_phones] begin[:] variable[url] assign[=] call[call[name[ad]][constant[url]].replace, parameter[constant[http://], constant[]]] variable[url] assign[=] call[name[re].sub, parameter[constant[\w+\.backpage\.com/\w+/], constant[], name[url]]] variable[url] assign[=] call[name[url].split, parameter[constant[/]]] variable[ret_data] assign[=] call[name[parser].parse_phone, parameter[name[url]]] if call[name[ret_data]][constant[0]] begin[:] call[call[name[item]][constant[phone]].extend, parameter[call[name[ret_data]][constant[0]]]] call[name[item]][constant[phone]] assign[=] call[name[list], parameter[call[name[set], parameter[call[name[item]][constant[phone]]]]]] variable[ret_data] assign[=] call[name[parser].parse_no_blacks, parameter[call[name[all_parts]][constant[body]]]] call[name[item]][constant[no_blacks]] assign[=] call[name[ret_data]][constant[0]] call[name[all_parts]][constant[body]] assign[=] call[name[ret_data]][constant[1]] variable[ret_data] assign[=] call[name[parser].parse_ethnicity, parameter[call[name[all_parts]][constant[body]]]] call[name[item]][constant[ethnicity]] assign[=] call[name[ret_data]][constant[0]] call[name[all_parts]][constant[body]] assign[=] call[name[ret_data]][constant[1]] variable[ret_data] assign[=] call[name[parser].parse_truckers, parameter[call[name[all_parts]][constant[body]]]] variable[tf] assign[=] call[name[ret_data]][constant[0]] if <ast.UnaryOp object at 0x7da1b0bdb610> begin[:] variable[tf] assign[=] call[call[name[parser].parse_truckers, parameter[call[name[all_parts]][constant[loc]]]]][constant[0]] call[name[item]][constant[trucker]] assign[=] name[tf] call[name[all_parts]][constant[body]] assign[=] call[name[ret_data]][constant[1]] call[name[item]][constant[name]] assign[=] call[name[parser].parse_name, parameter[call[name[all_parts]][constant[body]], name[global_names], name[common_words]]] call[name[item]][constant[indicators]] assign[=] call[name[parser].parse_indicators, parameter[call[name[all_parts]][constant[body]], call[name[item]][constant[ethnicity]]]] call[name[item]][constant[military]] assign[=] call[name[parser].parse_military_friendly, parameter[call[name[all_parts]][constant[body]]]] return[name[item]]
keyword[def] identifier[parse_single_ad] ( identifier[ad] , identifier[global_names] , identifier[common_words] , identifier[args] ={}): literal[string] identifier[multiple_phones] = keyword[False] keyword[if] literal[string] keyword[not] keyword[in] identifier[args] keyword[else] identifier[args] [ literal[string] ] identifier[item] ={} keyword[if] literal[string] keyword[in] identifier[ad] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[lower] (): identifier[item] [ literal[string] ]= literal[int] keyword[else] : identifier[item] [ literal[string] ]= literal[int] keyword[if] identifier[item] [ literal[string] ]== literal[int] : identifier[item] [ literal[string] ]=- literal[int] keyword[else] : identifier[item] [ literal[string] ]= identifier[int] ( identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[ad] [ literal[string] ][ literal[int] :])) keyword[if] identifier[item] [ literal[string] ]< literal[int] keyword[or] identifier[item] [ literal[string] ]> literal[int] : identifier[item] [ literal[string] ]=- literal[int] identifier[ageless_title] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[ad] [ literal[string] ]) identifier[ad_text] = identifier[json] . identifier[dumps] ( identifier[ad] [ literal[string] ]. identifier[lower] ()) identifier[decoded_body] = identifier[decode_unicode] ( identifier[ad] [ literal[string] ], identifier[replace_boo] = keyword[False] )[ literal[int] ] identifier[decoded_title] = identifier[decode_unicode] ( identifier[ageless_title] , identifier[replace_boo] = keyword[False] )[ literal[int] ] identifier[parts] = identifier[get_clean_parts] ( identifier[decoded_body] , identifier[decoded_title] ) identifier[loc_section] = identifier[get_location_section] ( identifier[ad_text] ) identifier[loc_parts] = identifier[get_clean_loc_parts] ( identifier[loc_section] , identifier[is_location] = keyword[True] ) identifier[all_parts] ={ literal[string] : identifier[parts] , literal[string] :[ literal[string] . identifier[join] ( identifier[loc_parts] )] } identifier[item] [ literal[string] ]= identifier[parser] . identifier[parse_time] ( identifier[repr] ( identifier[ad] [ literal[string] ])) identifier[item] [ literal[string] ]= identifier[parser] . identifier[parse_posting_id] ( identifier[ad_text] , identifier[ad] [ literal[string] ]) identifier[ret_data] = identifier[parser] . identifier[parse_phone] ( identifier[all_parts] [ literal[string] ], identifier[allow_multiple] = identifier[multiple_phones] ) identifier[item] [ literal[string] ]= identifier[ret_data] [ literal[int] ] identifier[all_parts] [ literal[string] ]= identifier[ret_data] [ literal[int] ] keyword[if] identifier[multiple_phones] : identifier[url] = identifier[ad] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ) identifier[url] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[url] ) identifier[url] = identifier[url] . identifier[split] ( literal[string] ) identifier[ret_data] = identifier[parser] . identifier[parse_phone] ( identifier[url] , identifier[allow_multiple] = identifier[multiple_phones] ) keyword[if] identifier[ret_data] [ literal[int] ]: identifier[item] [ literal[string] ]. identifier[extend] ( identifier[ret_data] [ literal[int] ]) identifier[item] [ literal[string] ]= identifier[list] ( identifier[set] ( identifier[item] [ literal[string] ])) identifier[ret_data] = identifier[parser] . identifier[parse_no_blacks] ( identifier[all_parts] [ literal[string] ]) identifier[item] [ literal[string] ]= identifier[ret_data] [ literal[int] ] identifier[all_parts] [ literal[string] ]= identifier[ret_data] [ literal[int] ] identifier[ret_data] = identifier[parser] . identifier[parse_ethnicity] ( identifier[all_parts] [ literal[string] ]) identifier[item] [ literal[string] ]= identifier[ret_data] [ literal[int] ] identifier[all_parts] [ literal[string] ]= identifier[ret_data] [ literal[int] ] identifier[ret_data] = identifier[parser] . identifier[parse_truckers] ( identifier[all_parts] [ literal[string] ]) identifier[tf] = identifier[ret_data] [ literal[int] ] keyword[if] keyword[not] identifier[tf] : identifier[tf] = identifier[parser] . identifier[parse_truckers] ( identifier[all_parts] [ literal[string] ])[ literal[int] ] identifier[item] [ literal[string] ]= identifier[tf] identifier[all_parts] [ literal[string] ]= identifier[ret_data] [ literal[int] ] identifier[item] [ literal[string] ]= identifier[parser] . identifier[parse_name] ( identifier[all_parts] [ literal[string] ], identifier[global_names] , identifier[common_words] ) identifier[item] [ literal[string] ]= identifier[parser] . identifier[parse_indicators] ( identifier[all_parts] [ literal[string] ], identifier[item] [ literal[string] ]) identifier[item] [ literal[string] ]= identifier[parser] . identifier[parse_military_friendly] ( identifier[all_parts] [ literal[string] ]) keyword[return] identifier[item]
def parse_single_ad(ad, global_names, common_words, args={}): """ An example extraction of a Backpage ad, with the following parameters: ad -> A dict representing an ad that is scraped as such: ad = items.BackpageScrapeItem( backpage_id=response.url.split('.')[0].split('/')[2].encode('utf-8'), date = str(self.static_now)[:10], posted_age = response.xpath("//p[@class='metaInfoDisplay']/text()").extract()[0].encode('utf-8'), posted_date = response.xpath("//div[@class='adInfo']/text()").extract()[0].encode('utf-8'), posted_title = response.xpath("//div[@id='postingTitle']//h1/text()").extract()[0].encode('utf-8'), posting_body= response.xpath("//div[@class='postingBody']").extract()[0].encode('utf-8'), text = response.body, url=response.url ) """ multiple_phones = False if 'multiple_phones' not in args else args['multiple_phones'] item = {} # Backpage category ## 1 --> FemaleEscorts ## 2 --> BodyRubs ## 3 --> Dating section (after 1/9/17 Backpage shutdown of FemaleEscorts and BodyRubs) ## 4 --> TherapeuticMassage section (1/23/17 partial begin date, 1/24/17 full begin date) if 'therapeuticmassage' in ad['url'].split('.backpage.com/')[1].lower(): item['category'] = 4 # depends on [control=['if'], data=[]] else: item['category'] = 3 # parse age if item['category'] == 4: item['age'] = -1 # depends on [control=['if'], data=[]] else: item['age'] = int(re.sub('\\D', '', ad['posted_age'][14:])) # Get rid of any posted age under 10 and over 60. Assign to -1 if it is invalid if item['age'] < 10 or item['age'] > 60: item['age'] = -1 # depends on [control=['if'], data=[]] ageless_title = re.sub(' {1,2}- {1,2}\\d\\d\\Z', '', ad['posted_title']) ad_text = json.dumps(ad['text'].lower()) # Get filtered, decomposed list of body + title parts. # 'Parts' are separated only when there is a newline (\n) character in the ad. decoded_body = decode_unicode(ad['posting_body'], replace_boo=False)[1] decoded_title = decode_unicode(ageless_title, replace_boo=False)[1] parts = get_clean_parts(decoded_body, decoded_title) loc_section = get_location_section(ad_text) loc_parts = get_clean_loc_parts(loc_section, is_location=True) all_parts = {'body': parts, 'loc': [','.join(loc_parts)]} item['time'] = parser.parse_time(repr(ad['posted_date'])) item['post_id'] = parser.parse_posting_id(ad_text, ad['city']) # Find/remove phone number ret_data = parser.parse_phone(all_parts['body'], allow_multiple=multiple_phones) item['phone'] = ret_data[0] all_parts['body'] = ret_data[1] if multiple_phones: # parse phone(s) from the URL url = ad['url'].replace('http://', '') url = re.sub('\\w+\\.backpage\\.com/\\w+/', '', url) url = url.split('/') ret_data = parser.parse_phone(url, allow_multiple=multiple_phones) if ret_data[0]: item['phone'].extend(ret_data[0]) item['phone'] = list(set(item['phone'])) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # Find/remove "No blacks allowed" ret_data = parser.parse_no_blacks(all_parts['body']) item['no_blacks'] = ret_data[0] all_parts['body'] = ret_data[1] # Find/remove ethnicity(s) ret_data = parser.parse_ethnicity(all_parts['body']) item['ethnicity'] = ret_data[0] all_parts['body'] = ret_data[1] # Find/remove "trucker friendly" ret_data = parser.parse_truckers(all_parts['body']) tf = ret_data[0] if not tf: tf = parser.parse_truckers(all_parts['loc'])[0] # depends on [control=['if'], data=[]] item['trucker'] = tf all_parts['body'] = ret_data[1] # Find names item['name'] = parser.parse_name(all_parts['body'], global_names, common_words) # Indicators that may indicate a HT victim. Still in early stages item['indicators'] = parser.parse_indicators(all_parts['body'], item['ethnicity']) # Parse whether or not the ad lists 'military' friendly item['military'] = parser.parse_military_friendly(all_parts['body']) return item
def add(self, class_name, name, **kwargs): """ Add a single component to the network. Adds it to component DataFrames. Parameters ---------- class_name : string Component class name in ["Bus","Generator","Load","StorageUnit","Store","ShuntImpedance","Line","Transformer","Link"] name : string Component name kwargs Component attributes, e.g. x=0.1, length=123 Examples -------- >>> network.add("Line", "line 12345", x=0.1) """ assert class_name in self.components, "Component class {} not found".format(class_name) cls_df = self.df(class_name) cls_pnl = self.pnl(class_name) name = str(name) assert name not in cls_df.index, "Failed to add {} component {} because there is already an object with this name in {}".format(class_name, name, self.components[class_name]["list_name"]) attrs = self.components[class_name]["attrs"] static_attrs = attrs[attrs.static].drop("name") #This guarantees that the correct attribute type is maintained obj_df = pd.DataFrame(data=[static_attrs.default],index=[name],columns=static_attrs.index) new_df = cls_df.append(obj_df, sort=False) setattr(self, self.components[class_name]["list_name"], new_df) for k,v in iteritems(kwargs): if k not in attrs.index: logger.warning("{} has no attribute {}, ignoring this passed value.".format(class_name,k)) continue typ = attrs.at[k, "typ"] if not attrs.at[k,"varying"]: new_df.at[name,k] = typ(v) elif attrs.at[k,"static"] and not isinstance(v, (pd.Series, np.ndarray, list)): new_df.at[name,k] = typ(v) else: cls_pnl[k][name] = pd.Series(data=v, index=self.snapshots, dtype=typ) for attr in ["bus","bus0","bus1"]: if attr in new_df.columns: bus_name = new_df.at[name,attr] if bus_name not in self.buses.index: logger.warning("The bus name `{}` given for {} of {} `{}` does not appear in network.buses".format(bus_name,attr,class_name,name))
def function[add, parameter[self, class_name, name]]: constant[ Add a single component to the network. Adds it to component DataFrames. Parameters ---------- class_name : string Component class name in ["Bus","Generator","Load","StorageUnit","Store","ShuntImpedance","Line","Transformer","Link"] name : string Component name kwargs Component attributes, e.g. x=0.1, length=123 Examples -------- >>> network.add("Line", "line 12345", x=0.1) ] assert[compare[name[class_name] in name[self].components]] variable[cls_df] assign[=] call[name[self].df, parameter[name[class_name]]] variable[cls_pnl] assign[=] call[name[self].pnl, parameter[name[class_name]]] variable[name] assign[=] call[name[str], parameter[name[name]]] assert[compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[cls_df].index]] variable[attrs] assign[=] call[call[name[self].components][name[class_name]]][constant[attrs]] variable[static_attrs] assign[=] call[call[name[attrs]][name[attrs].static].drop, parameter[constant[name]]] variable[obj_df] assign[=] call[name[pd].DataFrame, parameter[]] variable[new_df] assign[=] call[name[cls_df].append, parameter[name[obj_df]]] call[name[setattr], parameter[name[self], call[call[name[self].components][name[class_name]]][constant[list_name]], name[new_df]]] for taget[tuple[[<ast.Name object at 0x7da18dc981f0>, <ast.Name object at 0x7da18dc9bee0>]]] in starred[call[name[iteritems], parameter[name[kwargs]]]] begin[:] if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[attrs].index] begin[:] call[name[logger].warning, parameter[call[constant[{} has no attribute {}, ignoring this passed value.].format, parameter[name[class_name], name[k]]]]] continue variable[typ] assign[=] call[name[attrs].at][tuple[[<ast.Name object at 0x7da18dc980d0>, <ast.Constant object at 0x7da18dc9b490>]]] if <ast.UnaryOp object at 0x7da18dc9b790> begin[:] call[name[new_df].at][tuple[[<ast.Name object at 0x7da18dc9bdc0>, <ast.Name object at 0x7da18dc99c30>]]] assign[=] call[name[typ], parameter[name[v]]] for taget[name[attr]] in starred[list[[<ast.Constant object at 0x7da18dc99d80>, <ast.Constant object at 0x7da18dc9a740>, <ast.Constant object at 0x7da18dc9b730>]]] begin[:] if compare[name[attr] in name[new_df].columns] begin[:] variable[bus_name] assign[=] call[name[new_df].at][tuple[[<ast.Name object at 0x7da18dc98190>, <ast.Name object at 0x7da18dc99fc0>]]] if compare[name[bus_name] <ast.NotIn object at 0x7da2590d7190> name[self].buses.index] begin[:] call[name[logger].warning, parameter[call[constant[The bus name `{}` given for {} of {} `{}` does not appear in network.buses].format, parameter[name[bus_name], name[attr], name[class_name], name[name]]]]]
keyword[def] identifier[add] ( identifier[self] , identifier[class_name] , identifier[name] ,** identifier[kwargs] ): literal[string] keyword[assert] identifier[class_name] keyword[in] identifier[self] . identifier[components] , literal[string] . identifier[format] ( identifier[class_name] ) identifier[cls_df] = identifier[self] . identifier[df] ( identifier[class_name] ) identifier[cls_pnl] = identifier[self] . identifier[pnl] ( identifier[class_name] ) identifier[name] = identifier[str] ( identifier[name] ) keyword[assert] identifier[name] keyword[not] keyword[in] identifier[cls_df] . identifier[index] , literal[string] . identifier[format] ( identifier[class_name] , identifier[name] , identifier[self] . identifier[components] [ identifier[class_name] ][ literal[string] ]) identifier[attrs] = identifier[self] . identifier[components] [ identifier[class_name] ][ literal[string] ] identifier[static_attrs] = identifier[attrs] [ identifier[attrs] . identifier[static] ]. identifier[drop] ( literal[string] ) identifier[obj_df] = identifier[pd] . identifier[DataFrame] ( identifier[data] =[ identifier[static_attrs] . identifier[default] ], identifier[index] =[ identifier[name] ], identifier[columns] = identifier[static_attrs] . identifier[index] ) identifier[new_df] = identifier[cls_df] . identifier[append] ( identifier[obj_df] , identifier[sort] = keyword[False] ) identifier[setattr] ( identifier[self] , identifier[self] . identifier[components] [ identifier[class_name] ][ literal[string] ], identifier[new_df] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[iteritems] ( identifier[kwargs] ): keyword[if] identifier[k] keyword[not] keyword[in] identifier[attrs] . identifier[index] : identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[class_name] , identifier[k] )) keyword[continue] identifier[typ] = identifier[attrs] . identifier[at] [ identifier[k] , literal[string] ] keyword[if] keyword[not] identifier[attrs] . identifier[at] [ identifier[k] , literal[string] ]: identifier[new_df] . identifier[at] [ identifier[name] , identifier[k] ]= identifier[typ] ( identifier[v] ) keyword[elif] identifier[attrs] . identifier[at] [ identifier[k] , literal[string] ] keyword[and] keyword[not] identifier[isinstance] ( identifier[v] ,( identifier[pd] . identifier[Series] , identifier[np] . identifier[ndarray] , identifier[list] )): identifier[new_df] . identifier[at] [ identifier[name] , identifier[k] ]= identifier[typ] ( identifier[v] ) keyword[else] : identifier[cls_pnl] [ identifier[k] ][ identifier[name] ]= identifier[pd] . identifier[Series] ( identifier[data] = identifier[v] , identifier[index] = identifier[self] . identifier[snapshots] , identifier[dtype] = identifier[typ] ) keyword[for] identifier[attr] keyword[in] [ literal[string] , literal[string] , literal[string] ]: keyword[if] identifier[attr] keyword[in] identifier[new_df] . identifier[columns] : identifier[bus_name] = identifier[new_df] . identifier[at] [ identifier[name] , identifier[attr] ] keyword[if] identifier[bus_name] keyword[not] keyword[in] identifier[self] . identifier[buses] . identifier[index] : identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[bus_name] , identifier[attr] , identifier[class_name] , identifier[name] ))
def add(self, class_name, name, **kwargs): """ Add a single component to the network. Adds it to component DataFrames. Parameters ---------- class_name : string Component class name in ["Bus","Generator","Load","StorageUnit","Store","ShuntImpedance","Line","Transformer","Link"] name : string Component name kwargs Component attributes, e.g. x=0.1, length=123 Examples -------- >>> network.add("Line", "line 12345", x=0.1) """ assert class_name in self.components, 'Component class {} not found'.format(class_name) cls_df = self.df(class_name) cls_pnl = self.pnl(class_name) name = str(name) assert name not in cls_df.index, 'Failed to add {} component {} because there is already an object with this name in {}'.format(class_name, name, self.components[class_name]['list_name']) attrs = self.components[class_name]['attrs'] static_attrs = attrs[attrs.static].drop('name') #This guarantees that the correct attribute type is maintained obj_df = pd.DataFrame(data=[static_attrs.default], index=[name], columns=static_attrs.index) new_df = cls_df.append(obj_df, sort=False) setattr(self, self.components[class_name]['list_name'], new_df) for (k, v) in iteritems(kwargs): if k not in attrs.index: logger.warning('{} has no attribute {}, ignoring this passed value.'.format(class_name, k)) continue # depends on [control=['if'], data=['k']] typ = attrs.at[k, 'typ'] if not attrs.at[k, 'varying']: new_df.at[name, k] = typ(v) # depends on [control=['if'], data=[]] elif attrs.at[k, 'static'] and (not isinstance(v, (pd.Series, np.ndarray, list))): new_df.at[name, k] = typ(v) # depends on [control=['if'], data=[]] else: cls_pnl[k][name] = pd.Series(data=v, index=self.snapshots, dtype=typ) # depends on [control=['for'], data=[]] for attr in ['bus', 'bus0', 'bus1']: if attr in new_df.columns: bus_name = new_df.at[name, attr] if bus_name not in self.buses.index: logger.warning('The bus name `{}` given for {} of {} `{}` does not appear in network.buses'.format(bus_name, attr, class_name, name)) # depends on [control=['if'], data=['bus_name']] # depends on [control=['if'], data=['attr']] # depends on [control=['for'], data=['attr']]