code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def _ProcessMetadataFile(self, mediator, file_entry): """Processes a metadata file. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry of the metadata file. """ self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING self._event_extractor.ParseFileEntryMetadata(mediator, file_entry) for data_stream in file_entry.data_streams: if self._abort: break self.last_activity_timestamp = time.time() self._event_extractor.ParseMetadataFile( mediator, file_entry, data_stream.name)
def function[_ProcessMetadataFile, parameter[self, mediator, file_entry]]: constant[Processes a metadata file. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry of the metadata file. ] name[self].processing_status assign[=] name[definitions].STATUS_INDICATOR_EXTRACTING call[name[self]._event_extractor.ParseFileEntryMetadata, parameter[name[mediator], name[file_entry]]] for taget[name[data_stream]] in starred[name[file_entry].data_streams] begin[:] if name[self]._abort begin[:] break name[self].last_activity_timestamp assign[=] call[name[time].time, parameter[]] call[name[self]._event_extractor.ParseMetadataFile, parameter[name[mediator], name[file_entry], name[data_stream].name]]
keyword[def] identifier[_ProcessMetadataFile] ( identifier[self] , identifier[mediator] , identifier[file_entry] ): literal[string] identifier[self] . identifier[processing_status] = identifier[definitions] . identifier[STATUS_INDICATOR_EXTRACTING] identifier[self] . identifier[_event_extractor] . identifier[ParseFileEntryMetadata] ( identifier[mediator] , identifier[file_entry] ) keyword[for] identifier[data_stream] keyword[in] identifier[file_entry] . identifier[data_streams] : keyword[if] identifier[self] . identifier[_abort] : keyword[break] identifier[self] . identifier[last_activity_timestamp] = identifier[time] . identifier[time] () identifier[self] . identifier[_event_extractor] . identifier[ParseMetadataFile] ( identifier[mediator] , identifier[file_entry] , identifier[data_stream] . identifier[name] )
def _ProcessMetadataFile(self, mediator, file_entry): """Processes a metadata file. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry of the metadata file. """ self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING self._event_extractor.ParseFileEntryMetadata(mediator, file_entry) for data_stream in file_entry.data_streams: if self._abort: break # depends on [control=['if'], data=[]] self.last_activity_timestamp = time.time() self._event_extractor.ParseMetadataFile(mediator, file_entry, data_stream.name) # depends on [control=['for'], data=['data_stream']]
def require_perms(view_func, required): """Enforces permission-based access controls. :param list required: A tuple of permission names, all of which the request user must possess in order access the decorated view. Example usage:: from horizon.decorators import require_perms @require_perms(['foo.admin', 'foo.member']) def my_view(request): ... Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the requirements are not met. """ from horizon.exceptions import NotAuthorized # We only need to check each permission once for a view, so we'll use a set current_perms = getattr(view_func, '_required_perms', set([])) view_func._required_perms = current_perms | set(required) @functools.wraps(view_func, assigned=available_attrs(view_func)) def dec(request, *args, **kwargs): if request.user.is_authenticated: if request.user.has_perms(view_func._required_perms): return view_func(request, *args, **kwargs) raise NotAuthorized(_("You are not authorized to access %s") % request.path) # If we don't have any permissions, just return the original view. if required: return dec else: return view_func
def function[require_perms, parameter[view_func, required]]: constant[Enforces permission-based access controls. :param list required: A tuple of permission names, all of which the request user must possess in order access the decorated view. Example usage:: from horizon.decorators import require_perms @require_perms(['foo.admin', 'foo.member']) def my_view(request): ... Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the requirements are not met. ] from relative_module[horizon.exceptions] import module[NotAuthorized] variable[current_perms] assign[=] call[name[getattr], parameter[name[view_func], constant[_required_perms], call[name[set], parameter[list[[]]]]]] name[view_func]._required_perms assign[=] binary_operation[name[current_perms] <ast.BitOr object at 0x7da2590d6aa0> call[name[set], parameter[name[required]]]] def function[dec, parameter[request]]: if name[request].user.is_authenticated begin[:] if call[name[request].user.has_perms, parameter[name[view_func]._required_perms]] begin[:] return[call[name[view_func], parameter[name[request], <ast.Starred object at 0x7da1b18dc250>]]] <ast.Raise object at 0x7da1b18dcac0> if name[required] begin[:] return[name[dec]]
keyword[def] identifier[require_perms] ( identifier[view_func] , identifier[required] ): literal[string] keyword[from] identifier[horizon] . identifier[exceptions] keyword[import] identifier[NotAuthorized] identifier[current_perms] = identifier[getattr] ( identifier[view_func] , literal[string] , identifier[set] ([])) identifier[view_func] . identifier[_required_perms] = identifier[current_perms] | identifier[set] ( identifier[required] ) @ identifier[functools] . identifier[wraps] ( identifier[view_func] , identifier[assigned] = identifier[available_attrs] ( identifier[view_func] )) keyword[def] identifier[dec] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ): keyword[if] identifier[request] . identifier[user] . identifier[is_authenticated] : keyword[if] identifier[request] . identifier[user] . identifier[has_perms] ( identifier[view_func] . identifier[_required_perms] ): keyword[return] identifier[view_func] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ) keyword[raise] identifier[NotAuthorized] ( identifier[_] ( literal[string] ) % identifier[request] . identifier[path] ) keyword[if] identifier[required] : keyword[return] identifier[dec] keyword[else] : keyword[return] identifier[view_func]
def require_perms(view_func, required): """Enforces permission-based access controls. :param list required: A tuple of permission names, all of which the request user must possess in order access the decorated view. Example usage:: from horizon.decorators import require_perms @require_perms(['foo.admin', 'foo.member']) def my_view(request): ... Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the requirements are not met. """ from horizon.exceptions import NotAuthorized # We only need to check each permission once for a view, so we'll use a set current_perms = getattr(view_func, '_required_perms', set([])) view_func._required_perms = current_perms | set(required) @functools.wraps(view_func, assigned=available_attrs(view_func)) def dec(request, *args, **kwargs): if request.user.is_authenticated: if request.user.has_perms(view_func._required_perms): return view_func(request, *args, **kwargs) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] raise NotAuthorized(_('You are not authorized to access %s') % request.path) # If we don't have any permissions, just return the original view. if required: return dec # depends on [control=['if'], data=[]] else: return view_func
def replace_some(ol,value,*indexes,**kwargs): ''' from elist.elist import * ol = [1,'a',3,'a',5,'a',6,'a'] id(ol) new = replace_some(ol,'AAA',1,3,7) ol new id(ol) id(new) #### ol = [1,'a',3,'a',5,'a',6,'a'] id(ol) rslt = replace_some(ol,'AAA',1,3,7,mode="original") ol rslt id(ol) id(rslt) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" indexes = list(indexes) return(replace_seqs(ol,value,indexes,mode=mode))
def function[replace_some, parameter[ol, value]]: constant[ from elist.elist import * ol = [1,'a',3,'a',5,'a',6,'a'] id(ol) new = replace_some(ol,'AAA',1,3,7) ol new id(ol) id(new) #### ol = [1,'a',3,'a',5,'a',6,'a'] id(ol) rslt = replace_some(ol,'AAA',1,3,7,mode="original") ol rslt id(ol) id(rslt) ] if compare[constant[mode] in name[kwargs]] begin[:] variable[mode] assign[=] call[name[kwargs]][constant[mode]] variable[indexes] assign[=] call[name[list], parameter[name[indexes]]] return[call[name[replace_seqs], parameter[name[ol], name[value], name[indexes]]]]
keyword[def] identifier[replace_some] ( identifier[ol] , identifier[value] ,* identifier[indexes] ,** identifier[kwargs] ): literal[string] keyword[if] ( literal[string] keyword[in] identifier[kwargs] ): identifier[mode] = identifier[kwargs] [ literal[string] ] keyword[else] : identifier[mode] = literal[string] identifier[indexes] = identifier[list] ( identifier[indexes] ) keyword[return] ( identifier[replace_seqs] ( identifier[ol] , identifier[value] , identifier[indexes] , identifier[mode] = identifier[mode] ))
def replace_some(ol, value, *indexes, **kwargs): """ from elist.elist import * ol = [1,'a',3,'a',5,'a',6,'a'] id(ol) new = replace_some(ol,'AAA',1,3,7) ol new id(ol) id(new) #### ol = [1,'a',3,'a',5,'a',6,'a'] id(ol) rslt = replace_some(ol,'AAA',1,3,7,mode="original") ol rslt id(ol) id(rslt) """ if 'mode' in kwargs: mode = kwargs['mode'] # depends on [control=['if'], data=['kwargs']] else: mode = 'new' indexes = list(indexes) return replace_seqs(ol, value, indexes, mode=mode)
def download_source(version): """ Download Spark version. Uses same name as release tag without the leading 'v'. :param version: Version number to download. :return: None """ local_filename = 'v{}.zip'.format(Spark.svm_version_path(version)) Spark.download(Spark.spark_versions()['v{}'.format(version)], local_filename)
def function[download_source, parameter[version]]: constant[ Download Spark version. Uses same name as release tag without the leading 'v'. :param version: Version number to download. :return: None ] variable[local_filename] assign[=] call[constant[v{}.zip].format, parameter[call[name[Spark].svm_version_path, parameter[name[version]]]]] call[name[Spark].download, parameter[call[call[name[Spark].spark_versions, parameter[]]][call[constant[v{}].format, parameter[name[version]]]], name[local_filename]]]
keyword[def] identifier[download_source] ( identifier[version] ): literal[string] identifier[local_filename] = literal[string] . identifier[format] ( identifier[Spark] . identifier[svm_version_path] ( identifier[version] )) identifier[Spark] . identifier[download] ( identifier[Spark] . identifier[spark_versions] ()[ literal[string] . identifier[format] ( identifier[version] )], identifier[local_filename] )
def download_source(version): """ Download Spark version. Uses same name as release tag without the leading 'v'. :param version: Version number to download. :return: None """ local_filename = 'v{}.zip'.format(Spark.svm_version_path(version)) Spark.download(Spark.spark_versions()['v{}'.format(version)], local_filename)
def get_binary_stream(name): """Returns a system stream for byte processing. This essentially returns the stream from the sys module with the given name but it solves some compatibility issues between different Python versions. Primarily this function is necessary for getting binary streams on Python 3. :param name: the name of the stream to open. Valid names are ``'stdin'``, ``'stdout'`` and ``'stderr'`` """ opener = binary_streams.get(name) if opener is None: raise TypeError('Unknown standard stream %r' % name) return opener()
def function[get_binary_stream, parameter[name]]: constant[Returns a system stream for byte processing. This essentially returns the stream from the sys module with the given name but it solves some compatibility issues between different Python versions. Primarily this function is necessary for getting binary streams on Python 3. :param name: the name of the stream to open. Valid names are ``'stdin'``, ``'stdout'`` and ``'stderr'`` ] variable[opener] assign[=] call[name[binary_streams].get, parameter[name[name]]] if compare[name[opener] is constant[None]] begin[:] <ast.Raise object at 0x7da20c6c6c50> return[call[name[opener], parameter[]]]
keyword[def] identifier[get_binary_stream] ( identifier[name] ): literal[string] identifier[opener] = identifier[binary_streams] . identifier[get] ( identifier[name] ) keyword[if] identifier[opener] keyword[is] keyword[None] : keyword[raise] identifier[TypeError] ( literal[string] % identifier[name] ) keyword[return] identifier[opener] ()
def get_binary_stream(name): """Returns a system stream for byte processing. This essentially returns the stream from the sys module with the given name but it solves some compatibility issues between different Python versions. Primarily this function is necessary for getting binary streams on Python 3. :param name: the name of the stream to open. Valid names are ``'stdin'``, ``'stdout'`` and ``'stderr'`` """ opener = binary_streams.get(name) if opener is None: raise TypeError('Unknown standard stream %r' % name) # depends on [control=['if'], data=[]] return opener()
def extract_coverage(self, container: Container) -> FileLineSet: """ Extracts a report of the lines that have been executed since the last time that a coverage report was extracted. """ uid = container.uid r = self.__api.post('containers/{}/read-coverage'.format(uid)) if r.status_code == 200: return FileLineSet.from_dict(r.json()) self.__api.handle_erroneous_response(r)
def function[extract_coverage, parameter[self, container]]: constant[ Extracts a report of the lines that have been executed since the last time that a coverage report was extracted. ] variable[uid] assign[=] name[container].uid variable[r] assign[=] call[name[self].__api.post, parameter[call[constant[containers/{}/read-coverage].format, parameter[name[uid]]]]] if compare[name[r].status_code equal[==] constant[200]] begin[:] return[call[name[FileLineSet].from_dict, parameter[call[name[r].json, parameter[]]]]] call[name[self].__api.handle_erroneous_response, parameter[name[r]]]
keyword[def] identifier[extract_coverage] ( identifier[self] , identifier[container] : identifier[Container] )-> identifier[FileLineSet] : literal[string] identifier[uid] = identifier[container] . identifier[uid] identifier[r] = identifier[self] . identifier[__api] . identifier[post] ( literal[string] . identifier[format] ( identifier[uid] )) keyword[if] identifier[r] . identifier[status_code] == literal[int] : keyword[return] identifier[FileLineSet] . identifier[from_dict] ( identifier[r] . identifier[json] ()) identifier[self] . identifier[__api] . identifier[handle_erroneous_response] ( identifier[r] )
def extract_coverage(self, container: Container) -> FileLineSet: """ Extracts a report of the lines that have been executed since the last time that a coverage report was extracted. """ uid = container.uid r = self.__api.post('containers/{}/read-coverage'.format(uid)) if r.status_code == 200: return FileLineSet.from_dict(r.json()) # depends on [control=['if'], data=[]] self.__api.handle_erroneous_response(r)
def _set_mirror(self, v, load=False): """ Setter method for mirror, mapped from YANG variable /openflow_global/openflow/mirror (container) If this variable is read-only (config: false) in the source YANG file, then _set_mirror is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mirror() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=mirror.mirror, is_container='container', presence=False, yang_name="mirror", rest_name="mirror", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Openflow Mirror interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """mirror must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=mirror.mirror, is_container='container', presence=False, yang_name="mirror", rest_name="mirror", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Openflow Mirror interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""", }) self.__mirror = t if hasattr(self, '_set'): self._set()
def function[_set_mirror, parameter[self, v, load]]: constant[ Setter method for mirror, mapped from YANG variable /openflow_global/openflow/mirror (container) If this variable is read-only (config: false) in the source YANG file, then _set_mirror is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mirror() directly. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da2054a4ca0> name[self].__mirror assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_mirror] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[mirror] . identifier[mirror] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__mirror] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_mirror(self, v, load=False): """ Setter method for mirror, mapped from YANG variable /openflow_global/openflow/mirror (container) If this variable is read-only (config: false) in the source YANG file, then _set_mirror is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mirror() directly. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=mirror.mirror, is_container='container', presence=False, yang_name='mirror', rest_name='mirror', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Openflow Mirror interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'mirror must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=mirror.mirror, is_container=\'container\', presence=False, yang_name="mirror", rest_name="mirror", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Configure Openflow Mirror interface\', u\'cli-incomplete-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-openflow\', defining_module=\'brocade-openflow\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__mirror = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def read_dir(directory): '''Returns the text of all files in a directory.''' content = dir_list(directory) text = '' for filename in content: text += read_file(directory + '/' + filename) text += ' ' return text
def function[read_dir, parameter[directory]]: constant[Returns the text of all files in a directory.] variable[content] assign[=] call[name[dir_list], parameter[name[directory]]] variable[text] assign[=] constant[] for taget[name[filename]] in starred[name[content]] begin[:] <ast.AugAssign object at 0x7da2054a4f70> <ast.AugAssign object at 0x7da2054a6710> return[name[text]]
keyword[def] identifier[read_dir] ( identifier[directory] ): literal[string] identifier[content] = identifier[dir_list] ( identifier[directory] ) identifier[text] = literal[string] keyword[for] identifier[filename] keyword[in] identifier[content] : identifier[text] += identifier[read_file] ( identifier[directory] + literal[string] + identifier[filename] ) identifier[text] += literal[string] keyword[return] identifier[text]
def read_dir(directory): """Returns the text of all files in a directory.""" content = dir_list(directory) text = '' for filename in content: text += read_file(directory + '/' + filename) text += ' ' # depends on [control=['for'], data=['filename']] return text
def tiles_are_equal(tile_data_1, tile_data_2, fmt): """ Returns True if the tile data is equal in tile_data_1 and tile_data_2. For most formats, this is a simple byte-wise equality check. For zipped metatiles, we need to check the contents, as the zip format includes metadata such as timestamps and doesn't control file ordering. """ if fmt and fmt == zip_format: return metatiles_are_equal(tile_data_1, tile_data_2) else: return tile_data_1 == tile_data_2
def function[tiles_are_equal, parameter[tile_data_1, tile_data_2, fmt]]: constant[ Returns True if the tile data is equal in tile_data_1 and tile_data_2. For most formats, this is a simple byte-wise equality check. For zipped metatiles, we need to check the contents, as the zip format includes metadata such as timestamps and doesn't control file ordering. ] if <ast.BoolOp object at 0x7da20c6c5c30> begin[:] return[call[name[metatiles_are_equal], parameter[name[tile_data_1], name[tile_data_2]]]]
keyword[def] identifier[tiles_are_equal] ( identifier[tile_data_1] , identifier[tile_data_2] , identifier[fmt] ): literal[string] keyword[if] identifier[fmt] keyword[and] identifier[fmt] == identifier[zip_format] : keyword[return] identifier[metatiles_are_equal] ( identifier[tile_data_1] , identifier[tile_data_2] ) keyword[else] : keyword[return] identifier[tile_data_1] == identifier[tile_data_2]
def tiles_are_equal(tile_data_1, tile_data_2, fmt): """ Returns True if the tile data is equal in tile_data_1 and tile_data_2. For most formats, this is a simple byte-wise equality check. For zipped metatiles, we need to check the contents, as the zip format includes metadata such as timestamps and doesn't control file ordering. """ if fmt and fmt == zip_format: return metatiles_are_equal(tile_data_1, tile_data_2) # depends on [control=['if'], data=[]] else: return tile_data_1 == tile_data_2
def nextGen(self): """ Decide the fate of the cells """ self.current_gen += 1 self.change_gen[self.current_gen % 3] = copy.copy(self.grid) grid_cp = copy.copy(self.grid) for cell in self.grid: y, x = cell y1 = (y - 1) % self.y_grid y2 = (y + 1) % self.y_grid x1 = (x - 1) % self.x_grid x2 = (x + 1) % self.x_grid n = self.countNeighbours(cell) if n < 2 or n > 3: del grid_cp[cell] self.addchar(y + self.y_pad, x + self.x_pad, ' ') else: grid_cp[cell] = min(self.grid[cell] + 1, self.color_max) for neighbour in product([y1, y, y2], [x1, x, x2]): if not self.grid.get(neighbour): if self.countNeighbours(neighbour) == 3: y, x = neighbour y = y % self.y_grid x = x % self.x_grid neighbour = y, x grid_cp[neighbour] = 1 self.grid = grid_cp
def function[nextGen, parameter[self]]: constant[ Decide the fate of the cells ] <ast.AugAssign object at 0x7da204623eb0> call[name[self].change_gen][binary_operation[name[self].current_gen <ast.Mod object at 0x7da2590d6920> constant[3]]] assign[=] call[name[copy].copy, parameter[name[self].grid]] variable[grid_cp] assign[=] call[name[copy].copy, parameter[name[self].grid]] for taget[name[cell]] in starred[name[self].grid] begin[:] <ast.Tuple object at 0x7da2046200a0> assign[=] name[cell] variable[y1] assign[=] binary_operation[binary_operation[name[y] - constant[1]] <ast.Mod object at 0x7da2590d6920> name[self].y_grid] variable[y2] assign[=] binary_operation[binary_operation[name[y] + constant[1]] <ast.Mod object at 0x7da2590d6920> name[self].y_grid] variable[x1] assign[=] binary_operation[binary_operation[name[x] - constant[1]] <ast.Mod object at 0x7da2590d6920> name[self].x_grid] variable[x2] assign[=] binary_operation[binary_operation[name[x] + constant[1]] <ast.Mod object at 0x7da2590d6920> name[self].x_grid] variable[n] assign[=] call[name[self].countNeighbours, parameter[name[cell]]] if <ast.BoolOp object at 0x7da204623a00> begin[:] <ast.Delete object at 0x7da204621600> call[name[self].addchar, parameter[binary_operation[name[y] + name[self].y_pad], binary_operation[name[x] + name[self].x_pad], constant[ ]]] for taget[name[neighbour]] in starred[call[name[product], parameter[list[[<ast.Name object at 0x7da2046216f0>, <ast.Name object at 0x7da204623df0>, <ast.Name object at 0x7da204622350>]], list[[<ast.Name object at 0x7da204620040>, <ast.Name object at 0x7da204620070>, <ast.Name object at 0x7da204623940>]]]]] begin[:] if <ast.UnaryOp object at 0x7da204622ce0> begin[:] if compare[call[name[self].countNeighbours, parameter[name[neighbour]]] equal[==] constant[3]] begin[:] <ast.Tuple object at 0x7da2046208e0> assign[=] name[neighbour] variable[y] assign[=] binary_operation[name[y] <ast.Mod object at 0x7da2590d6920> name[self].y_grid] variable[x] assign[=] binary_operation[name[x] <ast.Mod object at 0x7da2590d6920> name[self].x_grid] variable[neighbour] assign[=] tuple[[<ast.Name object at 0x7da204621750>, <ast.Name object at 0x7da204621f90>]] call[name[grid_cp]][name[neighbour]] assign[=] constant[1] name[self].grid assign[=] name[grid_cp]
keyword[def] identifier[nextGen] ( identifier[self] ): literal[string] identifier[self] . identifier[current_gen] += literal[int] identifier[self] . identifier[change_gen] [ identifier[self] . identifier[current_gen] % literal[int] ]= identifier[copy] . identifier[copy] ( identifier[self] . identifier[grid] ) identifier[grid_cp] = identifier[copy] . identifier[copy] ( identifier[self] . identifier[grid] ) keyword[for] identifier[cell] keyword[in] identifier[self] . identifier[grid] : identifier[y] , identifier[x] = identifier[cell] identifier[y1] =( identifier[y] - literal[int] )% identifier[self] . identifier[y_grid] identifier[y2] =( identifier[y] + literal[int] )% identifier[self] . identifier[y_grid] identifier[x1] =( identifier[x] - literal[int] )% identifier[self] . identifier[x_grid] identifier[x2] =( identifier[x] + literal[int] )% identifier[self] . identifier[x_grid] identifier[n] = identifier[self] . identifier[countNeighbours] ( identifier[cell] ) keyword[if] identifier[n] < literal[int] keyword[or] identifier[n] > literal[int] : keyword[del] identifier[grid_cp] [ identifier[cell] ] identifier[self] . identifier[addchar] ( identifier[y] + identifier[self] . identifier[y_pad] , identifier[x] + identifier[self] . identifier[x_pad] , literal[string] ) keyword[else] : identifier[grid_cp] [ identifier[cell] ]= identifier[min] ( identifier[self] . identifier[grid] [ identifier[cell] ]+ literal[int] , identifier[self] . identifier[color_max] ) keyword[for] identifier[neighbour] keyword[in] identifier[product] ([ identifier[y1] , identifier[y] , identifier[y2] ],[ identifier[x1] , identifier[x] , identifier[x2] ]): keyword[if] keyword[not] identifier[self] . identifier[grid] . identifier[get] ( identifier[neighbour] ): keyword[if] identifier[self] . identifier[countNeighbours] ( identifier[neighbour] )== literal[int] : identifier[y] , identifier[x] = identifier[neighbour] identifier[y] = identifier[y] % identifier[self] . identifier[y_grid] identifier[x] = identifier[x] % identifier[self] . identifier[x_grid] identifier[neighbour] = identifier[y] , identifier[x] identifier[grid_cp] [ identifier[neighbour] ]= literal[int] identifier[self] . identifier[grid] = identifier[grid_cp]
def nextGen(self): """ Decide the fate of the cells """ self.current_gen += 1 self.change_gen[self.current_gen % 3] = copy.copy(self.grid) grid_cp = copy.copy(self.grid) for cell in self.grid: (y, x) = cell y1 = (y - 1) % self.y_grid y2 = (y + 1) % self.y_grid x1 = (x - 1) % self.x_grid x2 = (x + 1) % self.x_grid n = self.countNeighbours(cell) if n < 2 or n > 3: del grid_cp[cell] self.addchar(y + self.y_pad, x + self.x_pad, ' ') # depends on [control=['if'], data=[]] else: grid_cp[cell] = min(self.grid[cell] + 1, self.color_max) for neighbour in product([y1, y, y2], [x1, x, x2]): if not self.grid.get(neighbour): if self.countNeighbours(neighbour) == 3: (y, x) = neighbour y = y % self.y_grid x = x % self.x_grid neighbour = (y, x) grid_cp[neighbour] = 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['neighbour']] # depends on [control=['for'], data=['cell']] self.grid = grid_cp
def _nbtt(self): """Increment the tick and return branch, turn, tick Unless we're viewing the past, in which case raise HistoryError. Idea is you use this when you want to advance time, which you can only do once per branch, turn, tick. """ from .cache import HistoryError branch, turn, tick = self._btt() tick += 1 if (branch, turn) in self._turn_end_plan: if tick > self._turn_end_plan[branch, turn]: self._turn_end_plan[branch, turn] = tick else: tick = self._turn_end_plan[branch, turn] + 1 self._turn_end_plan[branch, turn] = tick if self._turn_end[branch, turn] > tick: raise HistoryError( "You're not at the end of turn {}. Go to tick {} to change things".format( turn, self._turn_end[branch, turn] ) ) parent, turn_start, tick_start, turn_end, tick_end = self._branches[branch] if turn < turn_end or ( turn == turn_end and tick < tick_end ): raise HistoryError( "You're in the past. Go to turn {}, tick {} to change things".format(turn_end, tick_end) ) if self._planning: if (turn, tick) in self._plan_ticks[self._last_plan]: raise HistoryError( "Trying to make a plan at {}, but that time already happened".format((branch, turn, tick)) ) self._plan_ticks[self._last_plan][turn].append(tick) self._plan_ticks_uncommitted.append((self._last_plan, turn, tick)) self._time_plan[branch, turn, tick] = self._last_plan self._otick = tick return branch, turn, tick
def function[_nbtt, parameter[self]]: constant[Increment the tick and return branch, turn, tick Unless we're viewing the past, in which case raise HistoryError. Idea is you use this when you want to advance time, which you can only do once per branch, turn, tick. ] from relative_module[cache] import module[HistoryError] <ast.Tuple object at 0x7da1b0b42200> assign[=] call[name[self]._btt, parameter[]] <ast.AugAssign object at 0x7da1b0b41a20> if compare[tuple[[<ast.Name object at 0x7da1b0b40be0>, <ast.Name object at 0x7da1b0b41f90>]] in name[self]._turn_end_plan] begin[:] if compare[name[tick] greater[>] call[name[self]._turn_end_plan][tuple[[<ast.Name object at 0x7da1b0b425f0>, <ast.Name object at 0x7da1b0b421a0>]]]] begin[:] call[name[self]._turn_end_plan][tuple[[<ast.Name object at 0x7da1b0b414e0>, <ast.Name object at 0x7da1b0b401f0>]]] assign[=] name[tick] call[name[self]._turn_end_plan][tuple[[<ast.Name object at 0x7da1b0b400d0>, <ast.Name object at 0x7da1b0b405b0>]]] assign[=] name[tick] if compare[call[name[self]._turn_end][tuple[[<ast.Name object at 0x7da1b0b409a0>, <ast.Name object at 0x7da1b0b43fd0>]]] greater[>] name[tick]] begin[:] <ast.Raise object at 0x7da1b0b40790> <ast.Tuple object at 0x7da1b0ba7580> assign[=] call[name[self]._branches][name[branch]] if <ast.BoolOp object at 0x7da1b0ba6da0> begin[:] <ast.Raise object at 0x7da1b0ba7490> if name[self]._planning begin[:] if compare[tuple[[<ast.Name object at 0x7da1b0c53b50>, <ast.Name object at 0x7da1b0c535e0>]] in call[name[self]._plan_ticks][name[self]._last_plan]] begin[:] <ast.Raise object at 0x7da1b0c51a80> call[call[call[name[self]._plan_ticks][name[self]._last_plan]][name[turn]].append, parameter[name[tick]]] call[name[self]._plan_ticks_uncommitted.append, parameter[tuple[[<ast.Attribute object at 0x7da1b0c512a0>, <ast.Name object at 0x7da1b0c51d20>, <ast.Name object at 0x7da1b0c53eb0>]]]] call[name[self]._time_plan][tuple[[<ast.Name object at 0x7da1b0c51db0>, <ast.Name object at 0x7da1b0c534f0>, <ast.Name object at 0x7da1b0c536d0>]]] assign[=] name[self]._last_plan name[self]._otick assign[=] name[tick] return[tuple[[<ast.Name object at 0x7da1b0c511e0>, <ast.Name object at 0x7da1b0c51ea0>, <ast.Name object at 0x7da1b0c538e0>]]]
keyword[def] identifier[_nbtt] ( identifier[self] ): literal[string] keyword[from] . identifier[cache] keyword[import] identifier[HistoryError] identifier[branch] , identifier[turn] , identifier[tick] = identifier[self] . identifier[_btt] () identifier[tick] += literal[int] keyword[if] ( identifier[branch] , identifier[turn] ) keyword[in] identifier[self] . identifier[_turn_end_plan] : keyword[if] identifier[tick] > identifier[self] . identifier[_turn_end_plan] [ identifier[branch] , identifier[turn] ]: identifier[self] . identifier[_turn_end_plan] [ identifier[branch] , identifier[turn] ]= identifier[tick] keyword[else] : identifier[tick] = identifier[self] . identifier[_turn_end_plan] [ identifier[branch] , identifier[turn] ]+ literal[int] identifier[self] . identifier[_turn_end_plan] [ identifier[branch] , identifier[turn] ]= identifier[tick] keyword[if] identifier[self] . identifier[_turn_end] [ identifier[branch] , identifier[turn] ]> identifier[tick] : keyword[raise] identifier[HistoryError] ( literal[string] . identifier[format] ( identifier[turn] , identifier[self] . identifier[_turn_end] [ identifier[branch] , identifier[turn] ] ) ) identifier[parent] , identifier[turn_start] , identifier[tick_start] , identifier[turn_end] , identifier[tick_end] = identifier[self] . identifier[_branches] [ identifier[branch] ] keyword[if] identifier[turn] < identifier[turn_end] keyword[or] ( identifier[turn] == identifier[turn_end] keyword[and] identifier[tick] < identifier[tick_end] ): keyword[raise] identifier[HistoryError] ( literal[string] . identifier[format] ( identifier[turn_end] , identifier[tick_end] ) ) keyword[if] identifier[self] . identifier[_planning] : keyword[if] ( identifier[turn] , identifier[tick] ) keyword[in] identifier[self] . identifier[_plan_ticks] [ identifier[self] . identifier[_last_plan] ]: keyword[raise] identifier[HistoryError] ( literal[string] . identifier[format] (( identifier[branch] , identifier[turn] , identifier[tick] )) ) identifier[self] . identifier[_plan_ticks] [ identifier[self] . identifier[_last_plan] ][ identifier[turn] ]. identifier[append] ( identifier[tick] ) identifier[self] . identifier[_plan_ticks_uncommitted] . identifier[append] (( identifier[self] . identifier[_last_plan] , identifier[turn] , identifier[tick] )) identifier[self] . identifier[_time_plan] [ identifier[branch] , identifier[turn] , identifier[tick] ]= identifier[self] . identifier[_last_plan] identifier[self] . identifier[_otick] = identifier[tick] keyword[return] identifier[branch] , identifier[turn] , identifier[tick]
def _nbtt(self): """Increment the tick and return branch, turn, tick Unless we're viewing the past, in which case raise HistoryError. Idea is you use this when you want to advance time, which you can only do once per branch, turn, tick. """ from .cache import HistoryError (branch, turn, tick) = self._btt() tick += 1 if (branch, turn) in self._turn_end_plan: if tick > self._turn_end_plan[branch, turn]: self._turn_end_plan[branch, turn] = tick # depends on [control=['if'], data=['tick']] else: tick = self._turn_end_plan[branch, turn] + 1 # depends on [control=['if'], data=[]] self._turn_end_plan[branch, turn] = tick if self._turn_end[branch, turn] > tick: raise HistoryError("You're not at the end of turn {}. Go to tick {} to change things".format(turn, self._turn_end[branch, turn])) # depends on [control=['if'], data=[]] (parent, turn_start, tick_start, turn_end, tick_end) = self._branches[branch] if turn < turn_end or (turn == turn_end and tick < tick_end): raise HistoryError("You're in the past. Go to turn {}, tick {} to change things".format(turn_end, tick_end)) # depends on [control=['if'], data=[]] if self._planning: if (turn, tick) in self._plan_ticks[self._last_plan]: raise HistoryError('Trying to make a plan at {}, but that time already happened'.format((branch, turn, tick))) # depends on [control=['if'], data=[]] self._plan_ticks[self._last_plan][turn].append(tick) self._plan_ticks_uncommitted.append((self._last_plan, turn, tick)) self._time_plan[branch, turn, tick] = self._last_plan # depends on [control=['if'], data=[]] self._otick = tick return (branch, turn, tick)
def base36(value): """ Encode int to base 36. """ result = '' while value: value, i = divmod(value, 36) result = BASE36_ALPHABET[i] + result return result
def function[base36, parameter[value]]: constant[ Encode int to base 36. ] variable[result] assign[=] constant[] while name[value] begin[:] <ast.Tuple object at 0x7da2044c1ab0> assign[=] call[name[divmod], parameter[name[value], constant[36]]] variable[result] assign[=] binary_operation[call[name[BASE36_ALPHABET]][name[i]] + name[result]] return[name[result]]
keyword[def] identifier[base36] ( identifier[value] ): literal[string] identifier[result] = literal[string] keyword[while] identifier[value] : identifier[value] , identifier[i] = identifier[divmod] ( identifier[value] , literal[int] ) identifier[result] = identifier[BASE36_ALPHABET] [ identifier[i] ]+ identifier[result] keyword[return] identifier[result]
def base36(value): """ Encode int to base 36. """ result = '' while value: (value, i) = divmod(value, 36) result = BASE36_ALPHABET[i] + result # depends on [control=['while'], data=[]] return result
def _on_complete_hook(self, my_task): """ Runs the task. Should not be called directly. Returns True if completed, False otherwise. """ # Find all matching conditions. outputs = [] for condition, output in self.cond_task_specs: if self.choice is not None and output not in self.choice: continue if condition is None: outputs.append(self._wf_spec.get_task_spec_from_name(output)) continue if not condition._matches(my_task): continue outputs.append(self._wf_spec.get_task_spec_from_name(output)) my_task._sync_children(outputs, Task.FUTURE) for child in my_task.children: child.task_spec._update(child)
def function[_on_complete_hook, parameter[self, my_task]]: constant[ Runs the task. Should not be called directly. Returns True if completed, False otherwise. ] variable[outputs] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b01c3b50>, <ast.Name object at 0x7da1b01c2e00>]]] in starred[name[self].cond_task_specs] begin[:] if <ast.BoolOp object at 0x7da1b01c1b70> begin[:] continue if compare[name[condition] is constant[None]] begin[:] call[name[outputs].append, parameter[call[name[self]._wf_spec.get_task_spec_from_name, parameter[name[output]]]]] continue if <ast.UnaryOp object at 0x7da1b01c3d00> begin[:] continue call[name[outputs].append, parameter[call[name[self]._wf_spec.get_task_spec_from_name, parameter[name[output]]]]] call[name[my_task]._sync_children, parameter[name[outputs], name[Task].FUTURE]] for taget[name[child]] in starred[name[my_task].children] begin[:] call[name[child].task_spec._update, parameter[name[child]]]
keyword[def] identifier[_on_complete_hook] ( identifier[self] , identifier[my_task] ): literal[string] identifier[outputs] =[] keyword[for] identifier[condition] , identifier[output] keyword[in] identifier[self] . identifier[cond_task_specs] : keyword[if] identifier[self] . identifier[choice] keyword[is] keyword[not] keyword[None] keyword[and] identifier[output] keyword[not] keyword[in] identifier[self] . identifier[choice] : keyword[continue] keyword[if] identifier[condition] keyword[is] keyword[None] : identifier[outputs] . identifier[append] ( identifier[self] . identifier[_wf_spec] . identifier[get_task_spec_from_name] ( identifier[output] )) keyword[continue] keyword[if] keyword[not] identifier[condition] . identifier[_matches] ( identifier[my_task] ): keyword[continue] identifier[outputs] . identifier[append] ( identifier[self] . identifier[_wf_spec] . identifier[get_task_spec_from_name] ( identifier[output] )) identifier[my_task] . identifier[_sync_children] ( identifier[outputs] , identifier[Task] . identifier[FUTURE] ) keyword[for] identifier[child] keyword[in] identifier[my_task] . identifier[children] : identifier[child] . identifier[task_spec] . identifier[_update] ( identifier[child] )
def _on_complete_hook(self, my_task): """ Runs the task. Should not be called directly. Returns True if completed, False otherwise. """ # Find all matching conditions. outputs = [] for (condition, output) in self.cond_task_specs: if self.choice is not None and output not in self.choice: continue # depends on [control=['if'], data=[]] if condition is None: outputs.append(self._wf_spec.get_task_spec_from_name(output)) continue # depends on [control=['if'], data=[]] if not condition._matches(my_task): continue # depends on [control=['if'], data=[]] outputs.append(self._wf_spec.get_task_spec_from_name(output)) # depends on [control=['for'], data=[]] my_task._sync_children(outputs, Task.FUTURE) for child in my_task.children: child.task_spec._update(child) # depends on [control=['for'], data=['child']]
def detect_interval( self, min_head_length=None, max_head_length=None, min_tail_length=None, max_tail_length=None ): """ Detect the interval of the audio file containing the fragments in the text file. Return the audio interval as a tuple of two :class:`~aeneas.exacttiming.TimeValue` objects, representing the begin and end time, in seconds, with respect to the full wave duration. If one of the parameters is ``None``, the default value (``0.0`` for min, ``10.0`` for max) will be used. :param min_head_length: estimated minimum head length :type min_head_length: :class:`~aeneas.exacttiming.TimeValue` :param max_head_length: estimated maximum head length :type max_head_length: :class:`~aeneas.exacttiming.TimeValue` :param min_tail_length: estimated minimum tail length :type min_tail_length: :class:`~aeneas.exacttiming.TimeValue` :param max_tail_length: estimated maximum tail length :type max_tail_length: :class:`~aeneas.exacttiming.TimeValue` :rtype: (:class:`~aeneas.exacttiming.TimeValue`, :class:`~aeneas.exacttiming.TimeValue`) :raises: TypeError: if one of the parameters is not ``None`` or a number :raises: ValueError: if one of the parameters is negative """ head = self.detect_head(min_head_length, max_head_length) tail = self.detect_tail(min_tail_length, max_tail_length) begin = head end = self.real_wave_mfcc.audio_length - tail self.log([u"Audio length: %.3f", self.real_wave_mfcc.audio_length]) self.log([u"Head length: %.3f", head]) self.log([u"Tail length: %.3f", tail]) self.log([u"Begin: %.3f", begin]) self.log([u"End: %.3f", end]) if (begin >= TimeValue("0.000")) and (end > begin): self.log([u"Returning %.3f %.3f", begin, end]) return (begin, end) self.log(u"Returning (0.000, 0.000)") return (TimeValue("0.000"), TimeValue("0.000"))
def function[detect_interval, parameter[self, min_head_length, max_head_length, min_tail_length, max_tail_length]]: constant[ Detect the interval of the audio file containing the fragments in the text file. Return the audio interval as a tuple of two :class:`~aeneas.exacttiming.TimeValue` objects, representing the begin and end time, in seconds, with respect to the full wave duration. If one of the parameters is ``None``, the default value (``0.0`` for min, ``10.0`` for max) will be used. :param min_head_length: estimated minimum head length :type min_head_length: :class:`~aeneas.exacttiming.TimeValue` :param max_head_length: estimated maximum head length :type max_head_length: :class:`~aeneas.exacttiming.TimeValue` :param min_tail_length: estimated minimum tail length :type min_tail_length: :class:`~aeneas.exacttiming.TimeValue` :param max_tail_length: estimated maximum tail length :type max_tail_length: :class:`~aeneas.exacttiming.TimeValue` :rtype: (:class:`~aeneas.exacttiming.TimeValue`, :class:`~aeneas.exacttiming.TimeValue`) :raises: TypeError: if one of the parameters is not ``None`` or a number :raises: ValueError: if one of the parameters is negative ] variable[head] assign[=] call[name[self].detect_head, parameter[name[min_head_length], name[max_head_length]]] variable[tail] assign[=] call[name[self].detect_tail, parameter[name[min_tail_length], name[max_tail_length]]] variable[begin] assign[=] name[head] variable[end] assign[=] binary_operation[name[self].real_wave_mfcc.audio_length - name[tail]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da207f99900>, <ast.Attribute object at 0x7da207f9b640>]]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da207f99390>, <ast.Name object at 0x7da207f9bb20>]]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da207f98760>, <ast.Name object at 0x7da207f98520>]]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da207f98040>, <ast.Name object at 0x7da207f99090>]]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da207f9bfd0>, <ast.Name object at 0x7da207f9a140>]]]] if <ast.BoolOp object at 0x7da207f9ab00> begin[:] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da207f9a890>, <ast.Name object at 0x7da207f9bca0>, <ast.Name object at 0x7da207f9bb80>]]]] return[tuple[[<ast.Name object at 0x7da207f9add0>, <ast.Name object at 0x7da207f98c10>]]] call[name[self].log, parameter[constant[Returning (0.000, 0.000)]]] return[tuple[[<ast.Call object at 0x7da207f98880>, <ast.Call object at 0x7da18f8133d0>]]]
keyword[def] identifier[detect_interval] ( identifier[self] , identifier[min_head_length] = keyword[None] , identifier[max_head_length] = keyword[None] , identifier[min_tail_length] = keyword[None] , identifier[max_tail_length] = keyword[None] ): literal[string] identifier[head] = identifier[self] . identifier[detect_head] ( identifier[min_head_length] , identifier[max_head_length] ) identifier[tail] = identifier[self] . identifier[detect_tail] ( identifier[min_tail_length] , identifier[max_tail_length] ) identifier[begin] = identifier[head] identifier[end] = identifier[self] . identifier[real_wave_mfcc] . identifier[audio_length] - identifier[tail] identifier[self] . identifier[log] ([ literal[string] , identifier[self] . identifier[real_wave_mfcc] . identifier[audio_length] ]) identifier[self] . identifier[log] ([ literal[string] , identifier[head] ]) identifier[self] . identifier[log] ([ literal[string] , identifier[tail] ]) identifier[self] . identifier[log] ([ literal[string] , identifier[begin] ]) identifier[self] . identifier[log] ([ literal[string] , identifier[end] ]) keyword[if] ( identifier[begin] >= identifier[TimeValue] ( literal[string] )) keyword[and] ( identifier[end] > identifier[begin] ): identifier[self] . identifier[log] ([ literal[string] , identifier[begin] , identifier[end] ]) keyword[return] ( identifier[begin] , identifier[end] ) identifier[self] . identifier[log] ( literal[string] ) keyword[return] ( identifier[TimeValue] ( literal[string] ), identifier[TimeValue] ( literal[string] ))
def detect_interval(self, min_head_length=None, max_head_length=None, min_tail_length=None, max_tail_length=None): """ Detect the interval of the audio file containing the fragments in the text file. Return the audio interval as a tuple of two :class:`~aeneas.exacttiming.TimeValue` objects, representing the begin and end time, in seconds, with respect to the full wave duration. If one of the parameters is ``None``, the default value (``0.0`` for min, ``10.0`` for max) will be used. :param min_head_length: estimated minimum head length :type min_head_length: :class:`~aeneas.exacttiming.TimeValue` :param max_head_length: estimated maximum head length :type max_head_length: :class:`~aeneas.exacttiming.TimeValue` :param min_tail_length: estimated minimum tail length :type min_tail_length: :class:`~aeneas.exacttiming.TimeValue` :param max_tail_length: estimated maximum tail length :type max_tail_length: :class:`~aeneas.exacttiming.TimeValue` :rtype: (:class:`~aeneas.exacttiming.TimeValue`, :class:`~aeneas.exacttiming.TimeValue`) :raises: TypeError: if one of the parameters is not ``None`` or a number :raises: ValueError: if one of the parameters is negative """ head = self.detect_head(min_head_length, max_head_length) tail = self.detect_tail(min_tail_length, max_tail_length) begin = head end = self.real_wave_mfcc.audio_length - tail self.log([u'Audio length: %.3f', self.real_wave_mfcc.audio_length]) self.log([u'Head length: %.3f', head]) self.log([u'Tail length: %.3f', tail]) self.log([u'Begin: %.3f', begin]) self.log([u'End: %.3f', end]) if begin >= TimeValue('0.000') and end > begin: self.log([u'Returning %.3f %.3f', begin, end]) return (begin, end) # depends on [control=['if'], data=[]] self.log(u'Returning (0.000, 0.000)') return (TimeValue('0.000'), TimeValue('0.000'))
def expand_dimension(self, newdim, dimension, maps={}, relations={}): ''' When we expand we need to provide new maps and relations as those can't be inferred ''' for name, attr in self.__attributes__.items(): if attr.dim == dimension: newattr = attr.copy() newattr.empty(newdim - attr.size) self.__attributes__[name] = concatenate_attributes([attr, newattr]) for name, rel in self.__relations__.items(): if dimension == rel.dim: # We need the new relation from the user if not rel.name in relations: raise ValueError('You need to provide the relation {} for this resize'.format(rel.name)) else: if len(relations[name]) != newdim: raise ValueError('New relation {} should be of size {}'.format(rel.name, newdim)) else: self.__relations__[name].value = relations[name] elif dimension == rel.map: # Extend the index rel.index = range(newdim) for (a, b), rel in self.maps.items(): if dimension == rel.dim: # We need the new relation from the user if not (a, b) in maps: raise ValueError('You need to provide the map {}->{} for this resize'.format(a, b)) else: if len(maps[a, b]) != newdim: raise ValueError('New map {} should be of size {}'.format(rel.name, newdim)) else: rel.value = maps[a, b] elif dimension == rel.map: # Extend the index rel.index = range(newdim) # Update dimensions self.dimensions[dimension] = newdim return self
def function[expand_dimension, parameter[self, newdim, dimension, maps, relations]]: constant[ When we expand we need to provide new maps and relations as those can't be inferred ] for taget[tuple[[<ast.Name object at 0x7da18dc04f10>, <ast.Name object at 0x7da18dc06620>]]] in starred[call[name[self].__attributes__.items, parameter[]]] begin[:] if compare[name[attr].dim equal[==] name[dimension]] begin[:] variable[newattr] assign[=] call[name[attr].copy, parameter[]] call[name[newattr].empty, parameter[binary_operation[name[newdim] - name[attr].size]]] call[name[self].__attributes__][name[name]] assign[=] call[name[concatenate_attributes], parameter[list[[<ast.Name object at 0x7da18dc07b80>, <ast.Name object at 0x7da18dc07af0>]]]] for taget[tuple[[<ast.Name object at 0x7da18dc05c00>, <ast.Name object at 0x7da18dc05a50>]]] in starred[call[name[self].__relations__.items, parameter[]]] begin[:] if compare[name[dimension] equal[==] name[rel].dim] begin[:] if <ast.UnaryOp object at 0x7da18dc07eb0> begin[:] <ast.Raise object at 0x7da18dc05d20> for taget[tuple[[<ast.Tuple object at 0x7da18dc04550>, <ast.Name object at 0x7da18dc07d90>]]] in starred[call[name[self].maps.items, parameter[]]] begin[:] if compare[name[dimension] equal[==] name[rel].dim] begin[:] if <ast.UnaryOp object at 0x7da20c6c4cd0> begin[:] <ast.Raise object at 0x7da20c6c7820> call[name[self].dimensions][name[dimension]] assign[=] name[newdim] return[name[self]]
keyword[def] identifier[expand_dimension] ( identifier[self] , identifier[newdim] , identifier[dimension] , identifier[maps] ={}, identifier[relations] ={}): literal[string] keyword[for] identifier[name] , identifier[attr] keyword[in] identifier[self] . identifier[__attributes__] . identifier[items] (): keyword[if] identifier[attr] . identifier[dim] == identifier[dimension] : identifier[newattr] = identifier[attr] . identifier[copy] () identifier[newattr] . identifier[empty] ( identifier[newdim] - identifier[attr] . identifier[size] ) identifier[self] . identifier[__attributes__] [ identifier[name] ]= identifier[concatenate_attributes] ([ identifier[attr] , identifier[newattr] ]) keyword[for] identifier[name] , identifier[rel] keyword[in] identifier[self] . identifier[__relations__] . identifier[items] (): keyword[if] identifier[dimension] == identifier[rel] . identifier[dim] : keyword[if] keyword[not] identifier[rel] . identifier[name] keyword[in] identifier[relations] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[rel] . identifier[name] )) keyword[else] : keyword[if] identifier[len] ( identifier[relations] [ identifier[name] ])!= identifier[newdim] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[rel] . identifier[name] , identifier[newdim] )) keyword[else] : identifier[self] . identifier[__relations__] [ identifier[name] ]. identifier[value] = identifier[relations] [ identifier[name] ] keyword[elif] identifier[dimension] == identifier[rel] . identifier[map] : identifier[rel] . identifier[index] = identifier[range] ( identifier[newdim] ) keyword[for] ( identifier[a] , identifier[b] ), identifier[rel] keyword[in] identifier[self] . identifier[maps] . identifier[items] (): keyword[if] identifier[dimension] == identifier[rel] . identifier[dim] : keyword[if] keyword[not] ( identifier[a] , identifier[b] ) keyword[in] identifier[maps] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[a] , identifier[b] )) keyword[else] : keyword[if] identifier[len] ( identifier[maps] [ identifier[a] , identifier[b] ])!= identifier[newdim] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[rel] . identifier[name] , identifier[newdim] )) keyword[else] : identifier[rel] . identifier[value] = identifier[maps] [ identifier[a] , identifier[b] ] keyword[elif] identifier[dimension] == identifier[rel] . identifier[map] : identifier[rel] . identifier[index] = identifier[range] ( identifier[newdim] ) identifier[self] . identifier[dimensions] [ identifier[dimension] ]= identifier[newdim] keyword[return] identifier[self]
def expand_dimension(self, newdim, dimension, maps={}, relations={}): """ When we expand we need to provide new maps and relations as those can't be inferred """ for (name, attr) in self.__attributes__.items(): if attr.dim == dimension: newattr = attr.copy() newattr.empty(newdim - attr.size) self.__attributes__[name] = concatenate_attributes([attr, newattr]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] for (name, rel) in self.__relations__.items(): if dimension == rel.dim: # We need the new relation from the user if not rel.name in relations: raise ValueError('You need to provide the relation {} for this resize'.format(rel.name)) # depends on [control=['if'], data=[]] elif len(relations[name]) != newdim: raise ValueError('New relation {} should be of size {}'.format(rel.name, newdim)) # depends on [control=['if'], data=['newdim']] else: self.__relations__[name].value = relations[name] # depends on [control=['if'], data=[]] elif dimension == rel.map: # Extend the index rel.index = range(newdim) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] for ((a, b), rel) in self.maps.items(): if dimension == rel.dim: # We need the new relation from the user if not (a, b) in maps: raise ValueError('You need to provide the map {}->{} for this resize'.format(a, b)) # depends on [control=['if'], data=[]] elif len(maps[a, b]) != newdim: raise ValueError('New map {} should be of size {}'.format(rel.name, newdim)) # depends on [control=['if'], data=['newdim']] else: rel.value = maps[a, b] # depends on [control=['if'], data=[]] elif dimension == rel.map: # Extend the index rel.index = range(newdim) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # Update dimensions self.dimensions[dimension] = newdim return self
def report_estimation_accuracy(request): """ Idea from Software Estimation, Demystifying the Black Art, McConnel 2006 Fig 3-3. """ contracts = ProjectContract.objects.filter( status=ProjectContract.STATUS_COMPLETE, type=ProjectContract.PROJECT_FIXED ) data = [('Target (hrs)', 'Actual (hrs)', 'Point Label')] for c in contracts: if c.contracted_hours() == 0: continue pt_label = "%s (%.2f%%)" % (c.name, c.hours_worked / c.contracted_hours() * 100) data.append((c.contracted_hours(), c.hours_worked, pt_label)) chart_max = max([max(x[0], x[1]) for x in data[1:]]) # max of all targets & actuals return render(request, 'timepiece/reports/estimation_accuracy.html', { 'data': json.dumps(data, cls=DecimalEncoder), 'chart_max': chart_max, })
def function[report_estimation_accuracy, parameter[request]]: constant[ Idea from Software Estimation, Demystifying the Black Art, McConnel 2006 Fig 3-3. ] variable[contracts] assign[=] call[name[ProjectContract].objects.filter, parameter[]] variable[data] assign[=] list[[<ast.Tuple object at 0x7da1b1080970>]] for taget[name[c]] in starred[name[contracts]] begin[:] if compare[call[name[c].contracted_hours, parameter[]] equal[==] constant[0]] begin[:] continue variable[pt_label] assign[=] binary_operation[constant[%s (%.2f%%)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1080c40>, <ast.BinOp object at 0x7da1b10808e0>]]] call[name[data].append, parameter[tuple[[<ast.Call object at 0x7da1b1083eb0>, <ast.Attribute object at 0x7da1b10818a0>, <ast.Name object at 0x7da1b1080730>]]]] variable[chart_max] assign[=] call[name[max], parameter[<ast.ListComp object at 0x7da1b1083a30>]] return[call[name[render], parameter[name[request], constant[timepiece/reports/estimation_accuracy.html], dictionary[[<ast.Constant object at 0x7da1b106d540>, <ast.Constant object at 0x7da1b106fe50>], [<ast.Call object at 0x7da1b106f970>, <ast.Name object at 0x7da1b106c0a0>]]]]]
keyword[def] identifier[report_estimation_accuracy] ( identifier[request] ): literal[string] identifier[contracts] = identifier[ProjectContract] . identifier[objects] . identifier[filter] ( identifier[status] = identifier[ProjectContract] . identifier[STATUS_COMPLETE] , identifier[type] = identifier[ProjectContract] . identifier[PROJECT_FIXED] ) identifier[data] =[( literal[string] , literal[string] , literal[string] )] keyword[for] identifier[c] keyword[in] identifier[contracts] : keyword[if] identifier[c] . identifier[contracted_hours] ()== literal[int] : keyword[continue] identifier[pt_label] = literal[string] %( identifier[c] . identifier[name] , identifier[c] . identifier[hours_worked] / identifier[c] . identifier[contracted_hours] ()* literal[int] ) identifier[data] . identifier[append] (( identifier[c] . identifier[contracted_hours] (), identifier[c] . identifier[hours_worked] , identifier[pt_label] )) identifier[chart_max] = identifier[max] ([ identifier[max] ( identifier[x] [ literal[int] ], identifier[x] [ literal[int] ]) keyword[for] identifier[x] keyword[in] identifier[data] [ literal[int] :]]) keyword[return] identifier[render] ( identifier[request] , literal[string] ,{ literal[string] : identifier[json] . identifier[dumps] ( identifier[data] , identifier[cls] = identifier[DecimalEncoder] ), literal[string] : identifier[chart_max] , })
def report_estimation_accuracy(request): """ Idea from Software Estimation, Demystifying the Black Art, McConnel 2006 Fig 3-3. """ contracts = ProjectContract.objects.filter(status=ProjectContract.STATUS_COMPLETE, type=ProjectContract.PROJECT_FIXED) data = [('Target (hrs)', 'Actual (hrs)', 'Point Label')] for c in contracts: if c.contracted_hours() == 0: continue # depends on [control=['if'], data=[]] pt_label = '%s (%.2f%%)' % (c.name, c.hours_worked / c.contracted_hours() * 100) data.append((c.contracted_hours(), c.hours_worked, pt_label)) chart_max = max([max(x[0], x[1]) for x in data[1:]]) # max of all targets & actuals # depends on [control=['for'], data=['c']] return render(request, 'timepiece/reports/estimation_accuracy.html', {'data': json.dumps(data, cls=DecimalEncoder), 'chart_max': chart_max})
def construct_tpb_graph(experiments: TomographyExperiment): """ Construct a graph where an edge signifies two experiments are diagonal in a TPB. """ g = nx.Graph() for expt in experiments: assert len(expt) == 1, 'already grouped?' expt = expt[0] if expt not in g: g.add_node(expt, count=1) else: g.nodes[expt]['count'] += 1 for expt1, expt2 in itertools.combinations(experiments, r=2): expt1 = expt1[0] expt2 = expt2[0] if expt1 == expt2: continue max_weight_in = _max_weight_state([expt1.in_state, expt2.in_state]) max_weight_out = _max_weight_operator([expt1.out_operator, expt2.out_operator]) if max_weight_in is not None and max_weight_out is not None: g.add_edge(expt1, expt2) return g
def function[construct_tpb_graph, parameter[experiments]]: constant[ Construct a graph where an edge signifies two experiments are diagonal in a TPB. ] variable[g] assign[=] call[name[nx].Graph, parameter[]] for taget[name[expt]] in starred[name[experiments]] begin[:] assert[compare[call[name[len], parameter[name[expt]]] equal[==] constant[1]]] variable[expt] assign[=] call[name[expt]][constant[0]] if compare[name[expt] <ast.NotIn object at 0x7da2590d7190> name[g]] begin[:] call[name[g].add_node, parameter[name[expt]]] for taget[tuple[[<ast.Name object at 0x7da1b1b34a90>, <ast.Name object at 0x7da1b1b36410>]]] in starred[call[name[itertools].combinations, parameter[name[experiments]]]] begin[:] variable[expt1] assign[=] call[name[expt1]][constant[0]] variable[expt2] assign[=] call[name[expt2]][constant[0]] if compare[name[expt1] equal[==] name[expt2]] begin[:] continue variable[max_weight_in] assign[=] call[name[_max_weight_state], parameter[list[[<ast.Attribute object at 0x7da1b1c5a320>, <ast.Attribute object at 0x7da1b1c587c0>]]]] variable[max_weight_out] assign[=] call[name[_max_weight_operator], parameter[list[[<ast.Attribute object at 0x7da1b1c5ba90>, <ast.Attribute object at 0x7da1b1c5a4d0>]]]] if <ast.BoolOp object at 0x7da1b1c5a650> begin[:] call[name[g].add_edge, parameter[name[expt1], name[expt2]]] return[name[g]]
keyword[def] identifier[construct_tpb_graph] ( identifier[experiments] : identifier[TomographyExperiment] ): literal[string] identifier[g] = identifier[nx] . identifier[Graph] () keyword[for] identifier[expt] keyword[in] identifier[experiments] : keyword[assert] identifier[len] ( identifier[expt] )== literal[int] , literal[string] identifier[expt] = identifier[expt] [ literal[int] ] keyword[if] identifier[expt] keyword[not] keyword[in] identifier[g] : identifier[g] . identifier[add_node] ( identifier[expt] , identifier[count] = literal[int] ) keyword[else] : identifier[g] . identifier[nodes] [ identifier[expt] ][ literal[string] ]+= literal[int] keyword[for] identifier[expt1] , identifier[expt2] keyword[in] identifier[itertools] . identifier[combinations] ( identifier[experiments] , identifier[r] = literal[int] ): identifier[expt1] = identifier[expt1] [ literal[int] ] identifier[expt2] = identifier[expt2] [ literal[int] ] keyword[if] identifier[expt1] == identifier[expt2] : keyword[continue] identifier[max_weight_in] = identifier[_max_weight_state] ([ identifier[expt1] . identifier[in_state] , identifier[expt2] . identifier[in_state] ]) identifier[max_weight_out] = identifier[_max_weight_operator] ([ identifier[expt1] . identifier[out_operator] , identifier[expt2] . identifier[out_operator] ]) keyword[if] identifier[max_weight_in] keyword[is] keyword[not] keyword[None] keyword[and] identifier[max_weight_out] keyword[is] keyword[not] keyword[None] : identifier[g] . identifier[add_edge] ( identifier[expt1] , identifier[expt2] ) keyword[return] identifier[g]
def construct_tpb_graph(experiments: TomographyExperiment): """ Construct a graph where an edge signifies two experiments are diagonal in a TPB. """ g = nx.Graph() for expt in experiments: assert len(expt) == 1, 'already grouped?' expt = expt[0] if expt not in g: g.add_node(expt, count=1) # depends on [control=['if'], data=['expt', 'g']] else: g.nodes[expt]['count'] += 1 # depends on [control=['for'], data=['expt']] for (expt1, expt2) in itertools.combinations(experiments, r=2): expt1 = expt1[0] expt2 = expt2[0] if expt1 == expt2: continue # depends on [control=['if'], data=[]] max_weight_in = _max_weight_state([expt1.in_state, expt2.in_state]) max_weight_out = _max_weight_operator([expt1.out_operator, expt2.out_operator]) if max_weight_in is not None and max_weight_out is not None: g.add_edge(expt1, expt2) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return g
def add_dir2pypath(path): """Add given directory to PYTHONPATH, e.g. for pylint.""" py_path = os.environ.get('PYTHONPATH', '') if path not in py_path.split(os.pathsep): py_path = ''.join([path, os.pathsep if py_path else '', py_path]) os.environ['PYTHONPATH'] = py_path
def function[add_dir2pypath, parameter[path]]: constant[Add given directory to PYTHONPATH, e.g. for pylint.] variable[py_path] assign[=] call[name[os].environ.get, parameter[constant[PYTHONPATH], constant[]]] if compare[name[path] <ast.NotIn object at 0x7da2590d7190> call[name[py_path].split, parameter[name[os].pathsep]]] begin[:] variable[py_path] assign[=] call[constant[].join, parameter[list[[<ast.Name object at 0x7da18f00f310>, <ast.IfExp object at 0x7da18f00cbb0>, <ast.Name object at 0x7da18f00d6f0>]]]] call[name[os].environ][constant[PYTHONPATH]] assign[=] name[py_path]
keyword[def] identifier[add_dir2pypath] ( identifier[path] ): literal[string] identifier[py_path] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[path] keyword[not] keyword[in] identifier[py_path] . identifier[split] ( identifier[os] . identifier[pathsep] ): identifier[py_path] = literal[string] . identifier[join] ([ identifier[path] , identifier[os] . identifier[pathsep] keyword[if] identifier[py_path] keyword[else] literal[string] , identifier[py_path] ]) identifier[os] . identifier[environ] [ literal[string] ]= identifier[py_path]
def add_dir2pypath(path): """Add given directory to PYTHONPATH, e.g. for pylint.""" py_path = os.environ.get('PYTHONPATH', '') if path not in py_path.split(os.pathsep): py_path = ''.join([path, os.pathsep if py_path else '', py_path]) os.environ['PYTHONPATH'] = py_path # depends on [control=['if'], data=['path']]
def read_features(self, tol=1e-3): """Reads the features from a file and stores them in the current object. Parameters ---------- tol: float Tolerance level to detect duration of audio. """ try: # Read JSON file with open(self.file_struct.features_file) as f: feats = json.load(f) # Store duration if self.dur is None: self.dur = float(feats["globals"]["dur"]) # Check that we have the correct global parameters assert(np.isclose( self.dur, float(feats["globals"]["dur"]), rtol=tol)) assert(self.sr == int(feats["globals"]["sample_rate"])) assert(self.hop_length == int(feats["globals"]["hop_length"])) assert(os.path.basename(self.file_struct.audio_file) == os.path.basename(feats["globals"]["audio_file"])) # Check for specific features params feat_params_err = FeatureParamsError( "Couldn't find features for %s id in file %s" % (self.get_id(), self.file_struct.features_file)) if self.get_id() not in feats.keys(): raise feat_params_err for param_name in self.get_param_names(): value = getattr(self, param_name) if hasattr(value, '__call__'): # Special case of functions if value.__name__ != \ feats[self.get_id()]["params"][param_name]: raise feat_params_err else: if str(value) != \ feats[self.get_id()]["params"][param_name]: raise feat_params_err # Store actual features self._est_beats_times = np.array(feats["est_beats"]) self._est_beatsync_times = np.array(feats["est_beatsync_times"]) self._est_beats_frames = librosa.core.time_to_frames( self._est_beats_times, sr=self.sr, hop_length=self.hop_length) self._framesync_features = \ np.array(feats[self.get_id()]["framesync"]) self._est_beatsync_features = \ np.array(feats[self.get_id()]["est_beatsync"]) # Read annotated beats if available if "ann_beats" in feats.keys(): self._ann_beats_times = np.array(feats["ann_beats"]) self._ann_beatsync_times = np.array(feats["ann_beatsync_times"]) self._ann_beats_frames = librosa.core.time_to_frames( self._ann_beats_times, sr=self.sr, hop_length=self.hop_length) self._ann_beatsync_features = \ np.array(feats[self.get_id()]["ann_beatsync"]) except KeyError: raise WrongFeaturesFormatError( "The features file %s is not correctly formatted" % self.file_struct.features_file) except AssertionError: raise FeaturesNotFound( "The features for the given parameters were not found in " "features file %s" % self.file_struct.features_file) except IOError: raise NoFeaturesFileError("Could not find features file %s", self.file_struct.features_file)
def function[read_features, parameter[self, tol]]: constant[Reads the features from a file and stores them in the current object. Parameters ---------- tol: float Tolerance level to detect duration of audio. ] <ast.Try object at 0x7da1b02a7e20>
keyword[def] identifier[read_features] ( identifier[self] , identifier[tol] = literal[int] ): literal[string] keyword[try] : keyword[with] identifier[open] ( identifier[self] . identifier[file_struct] . identifier[features_file] ) keyword[as] identifier[f] : identifier[feats] = identifier[json] . identifier[load] ( identifier[f] ) keyword[if] identifier[self] . identifier[dur] keyword[is] keyword[None] : identifier[self] . identifier[dur] = identifier[float] ( identifier[feats] [ literal[string] ][ literal[string] ]) keyword[assert] ( identifier[np] . identifier[isclose] ( identifier[self] . identifier[dur] , identifier[float] ( identifier[feats] [ literal[string] ][ literal[string] ]), identifier[rtol] = identifier[tol] )) keyword[assert] ( identifier[self] . identifier[sr] == identifier[int] ( identifier[feats] [ literal[string] ][ literal[string] ])) keyword[assert] ( identifier[self] . identifier[hop_length] == identifier[int] ( identifier[feats] [ literal[string] ][ literal[string] ])) keyword[assert] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[self] . identifier[file_struct] . identifier[audio_file] )== identifier[os] . identifier[path] . identifier[basename] ( identifier[feats] [ literal[string] ][ literal[string] ])) identifier[feat_params_err] = identifier[FeatureParamsError] ( literal[string] % ( identifier[self] . identifier[get_id] (), identifier[self] . identifier[file_struct] . identifier[features_file] )) keyword[if] identifier[self] . identifier[get_id] () keyword[not] keyword[in] identifier[feats] . identifier[keys] (): keyword[raise] identifier[feat_params_err] keyword[for] identifier[param_name] keyword[in] identifier[self] . identifier[get_param_names] (): identifier[value] = identifier[getattr] ( identifier[self] , identifier[param_name] ) keyword[if] identifier[hasattr] ( identifier[value] , literal[string] ): keyword[if] identifier[value] . identifier[__name__] != identifier[feats] [ identifier[self] . identifier[get_id] ()][ literal[string] ][ identifier[param_name] ]: keyword[raise] identifier[feat_params_err] keyword[else] : keyword[if] identifier[str] ( identifier[value] )!= identifier[feats] [ identifier[self] . identifier[get_id] ()][ literal[string] ][ identifier[param_name] ]: keyword[raise] identifier[feat_params_err] identifier[self] . identifier[_est_beats_times] = identifier[np] . identifier[array] ( identifier[feats] [ literal[string] ]) identifier[self] . identifier[_est_beatsync_times] = identifier[np] . identifier[array] ( identifier[feats] [ literal[string] ]) identifier[self] . identifier[_est_beats_frames] = identifier[librosa] . identifier[core] . identifier[time_to_frames] ( identifier[self] . identifier[_est_beats_times] , identifier[sr] = identifier[self] . identifier[sr] , identifier[hop_length] = identifier[self] . identifier[hop_length] ) identifier[self] . identifier[_framesync_features] = identifier[np] . identifier[array] ( identifier[feats] [ identifier[self] . identifier[get_id] ()][ literal[string] ]) identifier[self] . identifier[_est_beatsync_features] = identifier[np] . identifier[array] ( identifier[feats] [ identifier[self] . identifier[get_id] ()][ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[feats] . identifier[keys] (): identifier[self] . identifier[_ann_beats_times] = identifier[np] . identifier[array] ( identifier[feats] [ literal[string] ]) identifier[self] . identifier[_ann_beatsync_times] = identifier[np] . identifier[array] ( identifier[feats] [ literal[string] ]) identifier[self] . identifier[_ann_beats_frames] = identifier[librosa] . identifier[core] . identifier[time_to_frames] ( identifier[self] . identifier[_ann_beats_times] , identifier[sr] = identifier[self] . identifier[sr] , identifier[hop_length] = identifier[self] . identifier[hop_length] ) identifier[self] . identifier[_ann_beatsync_features] = identifier[np] . identifier[array] ( identifier[feats] [ identifier[self] . identifier[get_id] ()][ literal[string] ]) keyword[except] identifier[KeyError] : keyword[raise] identifier[WrongFeaturesFormatError] ( literal[string] % identifier[self] . identifier[file_struct] . identifier[features_file] ) keyword[except] identifier[AssertionError] : keyword[raise] identifier[FeaturesNotFound] ( literal[string] literal[string] % identifier[self] . identifier[file_struct] . identifier[features_file] ) keyword[except] identifier[IOError] : keyword[raise] identifier[NoFeaturesFileError] ( literal[string] , identifier[self] . identifier[file_struct] . identifier[features_file] )
def read_features(self, tol=0.001): """Reads the features from a file and stores them in the current object. Parameters ---------- tol: float Tolerance level to detect duration of audio. """ try: # Read JSON file with open(self.file_struct.features_file) as f: feats = json.load(f) # depends on [control=['with'], data=['f']] # Store duration if self.dur is None: self.dur = float(feats['globals']['dur']) # depends on [control=['if'], data=[]] # Check that we have the correct global parameters assert np.isclose(self.dur, float(feats['globals']['dur']), rtol=tol) assert self.sr == int(feats['globals']['sample_rate']) assert self.hop_length == int(feats['globals']['hop_length']) assert os.path.basename(self.file_struct.audio_file) == os.path.basename(feats['globals']['audio_file']) # Check for specific features params feat_params_err = FeatureParamsError("Couldn't find features for %s id in file %s" % (self.get_id(), self.file_struct.features_file)) if self.get_id() not in feats.keys(): raise feat_params_err # depends on [control=['if'], data=[]] for param_name in self.get_param_names(): value = getattr(self, param_name) if hasattr(value, '__call__'): # Special case of functions if value.__name__ != feats[self.get_id()]['params'][param_name]: raise feat_params_err # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif str(value) != feats[self.get_id()]['params'][param_name]: raise feat_params_err # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['param_name']] # Store actual features self._est_beats_times = np.array(feats['est_beats']) self._est_beatsync_times = np.array(feats['est_beatsync_times']) self._est_beats_frames = librosa.core.time_to_frames(self._est_beats_times, sr=self.sr, hop_length=self.hop_length) self._framesync_features = np.array(feats[self.get_id()]['framesync']) self._est_beatsync_features = np.array(feats[self.get_id()]['est_beatsync']) # Read annotated beats if available if 'ann_beats' in feats.keys(): self._ann_beats_times = np.array(feats['ann_beats']) self._ann_beatsync_times = np.array(feats['ann_beatsync_times']) self._ann_beats_frames = librosa.core.time_to_frames(self._ann_beats_times, sr=self.sr, hop_length=self.hop_length) self._ann_beatsync_features = np.array(feats[self.get_id()]['ann_beatsync']) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except KeyError: raise WrongFeaturesFormatError('The features file %s is not correctly formatted' % self.file_struct.features_file) # depends on [control=['except'], data=[]] except AssertionError: raise FeaturesNotFound('The features for the given parameters were not found in features file %s' % self.file_struct.features_file) # depends on [control=['except'], data=[]] except IOError: raise NoFeaturesFileError('Could not find features file %s', self.file_struct.features_file) # depends on [control=['except'], data=[]]
def save(self): """This function is called by the parent dialog window when the user selects to save the settings.""" if self.path is None: # Delete requested, so remove the current path from sys.path, if present if self.config_manager.userCodeDir is not None: sys.path.remove(self.config_manager.userCodeDir) self.config_manager.userCodeDir = None logger.info("Removed custom module search path from configuration and sys.path.") else: if self.path != self.config_manager.userCodeDir: if self.config_manager.userCodeDir is not None: sys.path.remove(self.config_manager.userCodeDir) sys.path.append(self.path) self.config_manager.userCodeDir = self.path logger.info("Saved custom module search path and added it to sys.path: {}".format(self.path))
def function[save, parameter[self]]: constant[This function is called by the parent dialog window when the user selects to save the settings.] if compare[name[self].path is constant[None]] begin[:] if compare[name[self].config_manager.userCodeDir is_not constant[None]] begin[:] call[name[sys].path.remove, parameter[name[self].config_manager.userCodeDir]] name[self].config_manager.userCodeDir assign[=] constant[None] call[name[logger].info, parameter[constant[Removed custom module search path from configuration and sys.path.]]]
keyword[def] identifier[save] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[path] keyword[is] keyword[None] : keyword[if] identifier[self] . identifier[config_manager] . identifier[userCodeDir] keyword[is] keyword[not] keyword[None] : identifier[sys] . identifier[path] . identifier[remove] ( identifier[self] . identifier[config_manager] . identifier[userCodeDir] ) identifier[self] . identifier[config_manager] . identifier[userCodeDir] = keyword[None] identifier[logger] . identifier[info] ( literal[string] ) keyword[else] : keyword[if] identifier[self] . identifier[path] != identifier[self] . identifier[config_manager] . identifier[userCodeDir] : keyword[if] identifier[self] . identifier[config_manager] . identifier[userCodeDir] keyword[is] keyword[not] keyword[None] : identifier[sys] . identifier[path] . identifier[remove] ( identifier[self] . identifier[config_manager] . identifier[userCodeDir] ) identifier[sys] . identifier[path] . identifier[append] ( identifier[self] . identifier[path] ) identifier[self] . identifier[config_manager] . identifier[userCodeDir] = identifier[self] . identifier[path] identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[path] ))
def save(self): """This function is called by the parent dialog window when the user selects to save the settings.""" if self.path is None: # Delete requested, so remove the current path from sys.path, if present if self.config_manager.userCodeDir is not None: sys.path.remove(self.config_manager.userCodeDir) self.config_manager.userCodeDir = None logger.info('Removed custom module search path from configuration and sys.path.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif self.path != self.config_manager.userCodeDir: if self.config_manager.userCodeDir is not None: sys.path.remove(self.config_manager.userCodeDir) # depends on [control=['if'], data=[]] sys.path.append(self.path) self.config_manager.userCodeDir = self.path logger.info('Saved custom module search path and added it to sys.path: {}'.format(self.path)) # depends on [control=['if'], data=[]]
def _run(self, tree): """ Run a query from a parse tree """ if tree.throttle: limiter = self._parse_throttle(tree.table, tree.throttle) self._query_rate_limit = limiter del tree["throttle"] return self._run(tree) if tree.action == "SELECT": return self._select(tree, self.allow_select_scan) elif tree.action == "SCAN": return self._scan(tree) elif tree.action == "DELETE": return self._delete(tree) elif tree.action == "UPDATE": return self._update(tree) elif tree.action == "CREATE": return self._create(tree) elif tree.action == "INSERT": return self._insert(tree) elif tree.action == "DROP": return self._drop(tree) elif tree.action == "ALTER": return self._alter(tree) elif tree.action == "DUMP": return self._dump(tree) elif tree.action == "LOAD": return self._load(tree) elif tree.action == "EXPLAIN": return self._explain(tree) elif tree.action == "ANALYZE": self._analyzing = True self.connection.default_return_capacity = True return self._run(tree[1]) else: raise SyntaxError("Unrecognized action '%s'" % tree.action)
def function[_run, parameter[self, tree]]: constant[ Run a query from a parse tree ] if name[tree].throttle begin[:] variable[limiter] assign[=] call[name[self]._parse_throttle, parameter[name[tree].table, name[tree].throttle]] name[self]._query_rate_limit assign[=] name[limiter] <ast.Delete object at 0x7da1b0cb7970> return[call[name[self]._run, parameter[name[tree]]]] if compare[name[tree].action equal[==] constant[SELECT]] begin[:] return[call[name[self]._select, parameter[name[tree], name[self].allow_select_scan]]]
keyword[def] identifier[_run] ( identifier[self] , identifier[tree] ): literal[string] keyword[if] identifier[tree] . identifier[throttle] : identifier[limiter] = identifier[self] . identifier[_parse_throttle] ( identifier[tree] . identifier[table] , identifier[tree] . identifier[throttle] ) identifier[self] . identifier[_query_rate_limit] = identifier[limiter] keyword[del] identifier[tree] [ literal[string] ] keyword[return] identifier[self] . identifier[_run] ( identifier[tree] ) keyword[if] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_select] ( identifier[tree] , identifier[self] . identifier[allow_select_scan] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_scan] ( identifier[tree] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_delete] ( identifier[tree] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_update] ( identifier[tree] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_create] ( identifier[tree] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_insert] ( identifier[tree] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_drop] ( identifier[tree] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_alter] ( identifier[tree] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_dump] ( identifier[tree] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_load] ( identifier[tree] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : keyword[return] identifier[self] . identifier[_explain] ( identifier[tree] ) keyword[elif] identifier[tree] . identifier[action] == literal[string] : identifier[self] . identifier[_analyzing] = keyword[True] identifier[self] . identifier[connection] . identifier[default_return_capacity] = keyword[True] keyword[return] identifier[self] . identifier[_run] ( identifier[tree] [ literal[int] ]) keyword[else] : keyword[raise] identifier[SyntaxError] ( literal[string] % identifier[tree] . identifier[action] )
def _run(self, tree): """ Run a query from a parse tree """ if tree.throttle: limiter = self._parse_throttle(tree.table, tree.throttle) self._query_rate_limit = limiter del tree['throttle'] return self._run(tree) # depends on [control=['if'], data=[]] if tree.action == 'SELECT': return self._select(tree, self.allow_select_scan) # depends on [control=['if'], data=[]] elif tree.action == 'SCAN': return self._scan(tree) # depends on [control=['if'], data=[]] elif tree.action == 'DELETE': return self._delete(tree) # depends on [control=['if'], data=[]] elif tree.action == 'UPDATE': return self._update(tree) # depends on [control=['if'], data=[]] elif tree.action == 'CREATE': return self._create(tree) # depends on [control=['if'], data=[]] elif tree.action == 'INSERT': return self._insert(tree) # depends on [control=['if'], data=[]] elif tree.action == 'DROP': return self._drop(tree) # depends on [control=['if'], data=[]] elif tree.action == 'ALTER': return self._alter(tree) # depends on [control=['if'], data=[]] elif tree.action == 'DUMP': return self._dump(tree) # depends on [control=['if'], data=[]] elif tree.action == 'LOAD': return self._load(tree) # depends on [control=['if'], data=[]] elif tree.action == 'EXPLAIN': return self._explain(tree) # depends on [control=['if'], data=[]] elif tree.action == 'ANALYZE': self._analyzing = True self.connection.default_return_capacity = True return self._run(tree[1]) # depends on [control=['if'], data=[]] else: raise SyntaxError("Unrecognized action '%s'" % tree.action)
def topNBottomN(self, column=0, nPercent=10, grabTopN=-1): """ Given a column name or one column index, a percent N, this function will return the top or bottom N% of the values of the column of a frame. The column must be a numerical column. :param column: a string for column name or an integer index :param nPercent: a top or bottom percentage of the column values to return :param grabTopN: -1 to grab bottom N percent and 1 to grab top N percent :returns: a H2OFrame containing two columns. The first column contains the original row indices where the top/bottom values are extracted from. The second column contains the values. """ assert (nPercent >= 0) and (nPercent<=100.0), "nPercent must be between 0.0 and 100.0" assert round(nPercent*0.01*self.nrows)>0, "Increase nPercent. Current value will result in top 0 row." if isinstance(column, int): if (column < 0) or (column>=self.ncols): raise H2OValueError("Invalid column index H2OFrame") else: colIndex = column else: # column is a column name col_names = self.names if column not in col_names: raise H2OValueError("Column name not found H2OFrame") else: colIndex = col_names.index(column) if not(self[colIndex].isnumeric()): raise H2OValueError("Wrong column type! Selected column must be numeric.") return H2OFrame._expr(expr=ExprNode("topn", self, colIndex, nPercent, grabTopN))
def function[topNBottomN, parameter[self, column, nPercent, grabTopN]]: constant[ Given a column name or one column index, a percent N, this function will return the top or bottom N% of the values of the column of a frame. The column must be a numerical column. :param column: a string for column name or an integer index :param nPercent: a top or bottom percentage of the column values to return :param grabTopN: -1 to grab bottom N percent and 1 to grab top N percent :returns: a H2OFrame containing two columns. The first column contains the original row indices where the top/bottom values are extracted from. The second column contains the values. ] assert[<ast.BoolOp object at 0x7da1b05bdea0>] assert[compare[call[name[round], parameter[binary_operation[binary_operation[name[nPercent] * constant[0.01]] * name[self].nrows]]] greater[>] constant[0]]] if call[name[isinstance], parameter[name[column], name[int]]] begin[:] if <ast.BoolOp object at 0x7da1b05bc0a0> begin[:] <ast.Raise object at 0x7da1b05bd7b0> if <ast.UnaryOp object at 0x7da1b03736d0> begin[:] <ast.Raise object at 0x7da1b03713f0> return[call[name[H2OFrame]._expr, parameter[]]]
keyword[def] identifier[topNBottomN] ( identifier[self] , identifier[column] = literal[int] , identifier[nPercent] = literal[int] , identifier[grabTopN] =- literal[int] ): literal[string] keyword[assert] ( identifier[nPercent] >= literal[int] ) keyword[and] ( identifier[nPercent] <= literal[int] ), literal[string] keyword[assert] identifier[round] ( identifier[nPercent] * literal[int] * identifier[self] . identifier[nrows] )> literal[int] , literal[string] keyword[if] identifier[isinstance] ( identifier[column] , identifier[int] ): keyword[if] ( identifier[column] < literal[int] ) keyword[or] ( identifier[column] >= identifier[self] . identifier[ncols] ): keyword[raise] identifier[H2OValueError] ( literal[string] ) keyword[else] : identifier[colIndex] = identifier[column] keyword[else] : identifier[col_names] = identifier[self] . identifier[names] keyword[if] identifier[column] keyword[not] keyword[in] identifier[col_names] : keyword[raise] identifier[H2OValueError] ( literal[string] ) keyword[else] : identifier[colIndex] = identifier[col_names] . identifier[index] ( identifier[column] ) keyword[if] keyword[not] ( identifier[self] [ identifier[colIndex] ]. identifier[isnumeric] ()): keyword[raise] identifier[H2OValueError] ( literal[string] ) keyword[return] identifier[H2OFrame] . identifier[_expr] ( identifier[expr] = identifier[ExprNode] ( literal[string] , identifier[self] , identifier[colIndex] , identifier[nPercent] , identifier[grabTopN] ))
def topNBottomN(self, column=0, nPercent=10, grabTopN=-1): """ Given a column name or one column index, a percent N, this function will return the top or bottom N% of the values of the column of a frame. The column must be a numerical column. :param column: a string for column name or an integer index :param nPercent: a top or bottom percentage of the column values to return :param grabTopN: -1 to grab bottom N percent and 1 to grab top N percent :returns: a H2OFrame containing two columns. The first column contains the original row indices where the top/bottom values are extracted from. The second column contains the values. """ assert nPercent >= 0 and nPercent <= 100.0, 'nPercent must be between 0.0 and 100.0' assert round(nPercent * 0.01 * self.nrows) > 0, 'Increase nPercent. Current value will result in top 0 row.' if isinstance(column, int): if column < 0 or column >= self.ncols: raise H2OValueError('Invalid column index H2OFrame') # depends on [control=['if'], data=[]] else: colIndex = column # depends on [control=['if'], data=[]] else: # column is a column name col_names = self.names if column not in col_names: raise H2OValueError('Column name not found H2OFrame') # depends on [control=['if'], data=[]] else: colIndex = col_names.index(column) if not self[colIndex].isnumeric(): raise H2OValueError('Wrong column type! Selected column must be numeric.') # depends on [control=['if'], data=[]] return H2OFrame._expr(expr=ExprNode('topn', self, colIndex, nPercent, grabTopN))
def get_stargazers(self, url, headers={}): """ Return a list of the stargazers of a GitHub repo Includes both the 'starred_at' and 'user' data. param: url url is the 'stargazers_url' of the form: https://api.github.com/repos/LLNL/spack/stargazers """ url = url + '/stargazers?per_page=100&page=%s' page = 1 gazers = [] json_data = requests.get(url % page, headers=headers).json() while json_data: gazers.extend(json_data) page += 1 json_data = requests.get(url % page, headers=headers).json() return gazers
def function[get_stargazers, parameter[self, url, headers]]: constant[ Return a list of the stargazers of a GitHub repo Includes both the 'starred_at' and 'user' data. param: url url is the 'stargazers_url' of the form: https://api.github.com/repos/LLNL/spack/stargazers ] variable[url] assign[=] binary_operation[name[url] + constant[/stargazers?per_page=100&page=%s]] variable[page] assign[=] constant[1] variable[gazers] assign[=] list[[]] variable[json_data] assign[=] call[call[name[requests].get, parameter[binary_operation[name[url] <ast.Mod object at 0x7da2590d6920> name[page]]]].json, parameter[]] while name[json_data] begin[:] call[name[gazers].extend, parameter[name[json_data]]] <ast.AugAssign object at 0x7da1b02c6ec0> variable[json_data] assign[=] call[call[name[requests].get, parameter[binary_operation[name[url] <ast.Mod object at 0x7da2590d6920> name[page]]]].json, parameter[]] return[name[gazers]]
keyword[def] identifier[get_stargazers] ( identifier[self] , identifier[url] , identifier[headers] ={}): literal[string] identifier[url] = identifier[url] + literal[string] identifier[page] = literal[int] identifier[gazers] =[] identifier[json_data] = identifier[requests] . identifier[get] ( identifier[url] % identifier[page] , identifier[headers] = identifier[headers] ). identifier[json] () keyword[while] identifier[json_data] : identifier[gazers] . identifier[extend] ( identifier[json_data] ) identifier[page] += literal[int] identifier[json_data] = identifier[requests] . identifier[get] ( identifier[url] % identifier[page] , identifier[headers] = identifier[headers] ). identifier[json] () keyword[return] identifier[gazers]
def get_stargazers(self, url, headers={}): """ Return a list of the stargazers of a GitHub repo Includes both the 'starred_at' and 'user' data. param: url url is the 'stargazers_url' of the form: https://api.github.com/repos/LLNL/spack/stargazers """ url = url + '/stargazers?per_page=100&page=%s' page = 1 gazers = [] json_data = requests.get(url % page, headers=headers).json() while json_data: gazers.extend(json_data) page += 1 json_data = requests.get(url % page, headers=headers).json() # depends on [control=['while'], data=[]] return gazers
def _integrate(self, time_steps, capture_elements, return_timestamps): """ Performs euler integration Parameters ---------- time_steps: iterable the time steps that the integrator progresses over capture_elements: list which model elements to capture - uses pysafe names return_timestamps: which subset of 'timesteps' should be values be returned? Returns ------- outputs: list of dictionaries """ # Todo: consider adding the timestamp to the return elements, and using that as the index outputs = [] for t2 in time_steps[1:]: if self.time() in return_timestamps: outputs.append({key: getattr(self.components, key)() for key in capture_elements}) self._euler_step(t2 - self.time()) self.time.update(t2) # this will clear the stepwise caches # need to add one more time step, because we run only the state updates in the previous # loop and thus may be one short. if self.time() in return_timestamps: outputs.append({key: getattr(self.components, key)() for key in capture_elements}) return outputs
def function[_integrate, parameter[self, time_steps, capture_elements, return_timestamps]]: constant[ Performs euler integration Parameters ---------- time_steps: iterable the time steps that the integrator progresses over capture_elements: list which model elements to capture - uses pysafe names return_timestamps: which subset of 'timesteps' should be values be returned? Returns ------- outputs: list of dictionaries ] variable[outputs] assign[=] list[[]] for taget[name[t2]] in starred[call[name[time_steps]][<ast.Slice object at 0x7da18bcc8340>]] begin[:] if compare[call[name[self].time, parameter[]] in name[return_timestamps]] begin[:] call[name[outputs].append, parameter[<ast.DictComp object at 0x7da18bccb3d0>]] call[name[self]._euler_step, parameter[binary_operation[name[t2] - call[name[self].time, parameter[]]]]] call[name[self].time.update, parameter[name[t2]]] if compare[call[name[self].time, parameter[]] in name[return_timestamps]] begin[:] call[name[outputs].append, parameter[<ast.DictComp object at 0x7da18bcc9210>]] return[name[outputs]]
keyword[def] identifier[_integrate] ( identifier[self] , identifier[time_steps] , identifier[capture_elements] , identifier[return_timestamps] ): literal[string] identifier[outputs] =[] keyword[for] identifier[t2] keyword[in] identifier[time_steps] [ literal[int] :]: keyword[if] identifier[self] . identifier[time] () keyword[in] identifier[return_timestamps] : identifier[outputs] . identifier[append] ({ identifier[key] : identifier[getattr] ( identifier[self] . identifier[components] , identifier[key] )() keyword[for] identifier[key] keyword[in] identifier[capture_elements] }) identifier[self] . identifier[_euler_step] ( identifier[t2] - identifier[self] . identifier[time] ()) identifier[self] . identifier[time] . identifier[update] ( identifier[t2] ) keyword[if] identifier[self] . identifier[time] () keyword[in] identifier[return_timestamps] : identifier[outputs] . identifier[append] ({ identifier[key] : identifier[getattr] ( identifier[self] . identifier[components] , identifier[key] )() keyword[for] identifier[key] keyword[in] identifier[capture_elements] }) keyword[return] identifier[outputs]
def _integrate(self, time_steps, capture_elements, return_timestamps): """ Performs euler integration Parameters ---------- time_steps: iterable the time steps that the integrator progresses over capture_elements: list which model elements to capture - uses pysafe names return_timestamps: which subset of 'timesteps' should be values be returned? Returns ------- outputs: list of dictionaries """ # Todo: consider adding the timestamp to the return elements, and using that as the index outputs = [] for t2 in time_steps[1:]: if self.time() in return_timestamps: outputs.append({key: getattr(self.components, key)() for key in capture_elements}) # depends on [control=['if'], data=[]] self._euler_step(t2 - self.time()) self.time.update(t2) # this will clear the stepwise caches # depends on [control=['for'], data=['t2']] # need to add one more time step, because we run only the state updates in the previous # loop and thus may be one short. if self.time() in return_timestamps: outputs.append({key: getattr(self.components, key)() for key in capture_elements}) # depends on [control=['if'], data=[]] return outputs
def get_referrers(self, url='', headers={}, repo_name=''): """ Retrieves the total referrers and unique referrers of all repos in json and then stores it in a dict. """ #JSON url_referrers = (url + '/traffic/popular/referrers') r1 = requests.get(url_referrers, headers=headers) referrers_json = r1.json() self.referrers_json[repo_name] = referrers_json #CSV for referrer in referrers_json: ref_name = referrer['referrer'] try: tuple_in = (referrer['count'], referrer['uniques'])#curr vals tuple = (self.referrers[ref_name][0] + tuple_in[0],#cal new vals self.referrers[ref_name][1] + tuple_in[1]) self.referrers[ref_name] = tuple#record new vals except KeyError: tuple = self.referrers[ref_name] = (referrer['count'], referrer['uniques']) self.referrers_lower[ref_name.lower()] = ref_name
def function[get_referrers, parameter[self, url, headers, repo_name]]: constant[ Retrieves the total referrers and unique referrers of all repos in json and then stores it in a dict. ] variable[url_referrers] assign[=] binary_operation[name[url] + constant[/traffic/popular/referrers]] variable[r1] assign[=] call[name[requests].get, parameter[name[url_referrers]]] variable[referrers_json] assign[=] call[name[r1].json, parameter[]] call[name[self].referrers_json][name[repo_name]] assign[=] name[referrers_json] for taget[name[referrer]] in starred[name[referrers_json]] begin[:] variable[ref_name] assign[=] call[name[referrer]][constant[referrer]] <ast.Try object at 0x7da1b02d8d00>
keyword[def] identifier[get_referrers] ( identifier[self] , identifier[url] = literal[string] , identifier[headers] ={}, identifier[repo_name] = literal[string] ): literal[string] identifier[url_referrers] =( identifier[url] + literal[string] ) identifier[r1] = identifier[requests] . identifier[get] ( identifier[url_referrers] , identifier[headers] = identifier[headers] ) identifier[referrers_json] = identifier[r1] . identifier[json] () identifier[self] . identifier[referrers_json] [ identifier[repo_name] ]= identifier[referrers_json] keyword[for] identifier[referrer] keyword[in] identifier[referrers_json] : identifier[ref_name] = identifier[referrer] [ literal[string] ] keyword[try] : identifier[tuple_in] =( identifier[referrer] [ literal[string] ], identifier[referrer] [ literal[string] ]) identifier[tuple] =( identifier[self] . identifier[referrers] [ identifier[ref_name] ][ literal[int] ]+ identifier[tuple_in] [ literal[int] ], identifier[self] . identifier[referrers] [ identifier[ref_name] ][ literal[int] ]+ identifier[tuple_in] [ literal[int] ]) identifier[self] . identifier[referrers] [ identifier[ref_name] ]= identifier[tuple] keyword[except] identifier[KeyError] : identifier[tuple] = identifier[self] . identifier[referrers] [ identifier[ref_name] ]=( identifier[referrer] [ literal[string] ], identifier[referrer] [ literal[string] ]) identifier[self] . identifier[referrers_lower] [ identifier[ref_name] . identifier[lower] ()]= identifier[ref_name]
def get_referrers(self, url='', headers={}, repo_name=''): """ Retrieves the total referrers and unique referrers of all repos in json and then stores it in a dict. """ #JSON url_referrers = url + '/traffic/popular/referrers' r1 = requests.get(url_referrers, headers=headers) referrers_json = r1.json() self.referrers_json[repo_name] = referrers_json #CSV for referrer in referrers_json: ref_name = referrer['referrer'] try: tuple_in = (referrer['count'], referrer['uniques']) #curr vals #cal new vals tuple = (self.referrers[ref_name][0] + tuple_in[0], self.referrers[ref_name][1] + tuple_in[1]) self.referrers[ref_name] = tuple #record new vals # depends on [control=['try'], data=[]] except KeyError: tuple = self.referrers[ref_name] = (referrer['count'], referrer['uniques']) self.referrers_lower[ref_name.lower()] = ref_name # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['referrer']]
def thread_exists(self, thread_id): """Check if a thread exists or has 404'd. Args: thread_id (int): Thread ID Returns: bool: Whether the given thread exists on this board. """ return self._requests_session.head( self._url.thread_api_url( thread_id=thread_id ) ).ok
def function[thread_exists, parameter[self, thread_id]]: constant[Check if a thread exists or has 404'd. Args: thread_id (int): Thread ID Returns: bool: Whether the given thread exists on this board. ] return[call[name[self]._requests_session.head, parameter[call[name[self]._url.thread_api_url, parameter[]]]].ok]
keyword[def] identifier[thread_exists] ( identifier[self] , identifier[thread_id] ): literal[string] keyword[return] identifier[self] . identifier[_requests_session] . identifier[head] ( identifier[self] . identifier[_url] . identifier[thread_api_url] ( identifier[thread_id] = identifier[thread_id] ) ). identifier[ok]
def thread_exists(self, thread_id): """Check if a thread exists or has 404'd. Args: thread_id (int): Thread ID Returns: bool: Whether the given thread exists on this board. """ return self._requests_session.head(self._url.thread_api_url(thread_id=thread_id)).ok
def xray_botocore_api_call(wrapped, instance, args, kwargs): """Wrapper around botocore's base client API call method.""" return generic_xray_wrapper( wrapped, instance, args, kwargs, name=get_service_name, namespace='aws', metadata_extractor=extract_aws_metadata, error_handling_type=ERROR_HANDLING_BOTOCORE, )
def function[xray_botocore_api_call, parameter[wrapped, instance, args, kwargs]]: constant[Wrapper around botocore's base client API call method.] return[call[name[generic_xray_wrapper], parameter[name[wrapped], name[instance], name[args], name[kwargs]]]]
keyword[def] identifier[xray_botocore_api_call] ( identifier[wrapped] , identifier[instance] , identifier[args] , identifier[kwargs] ): literal[string] keyword[return] identifier[generic_xray_wrapper] ( identifier[wrapped] , identifier[instance] , identifier[args] , identifier[kwargs] , identifier[name] = identifier[get_service_name] , identifier[namespace] = literal[string] , identifier[metadata_extractor] = identifier[extract_aws_metadata] , identifier[error_handling_type] = identifier[ERROR_HANDLING_BOTOCORE] , )
def xray_botocore_api_call(wrapped, instance, args, kwargs): """Wrapper around botocore's base client API call method.""" return generic_xray_wrapper(wrapped, instance, args, kwargs, name=get_service_name, namespace='aws', metadata_extractor=extract_aws_metadata, error_handling_type=ERROR_HANDLING_BOTOCORE)
def _parse_supybot_timestamp(self, line): """Parse timestamp section""" m = self.SUPYBOT_TIMESTAMP_REGEX.match(line) if not m: msg = "date expected on line %s" % (str(self.nline)) raise ParseError(cause=msg) ts = m.group('ts') msg = m.group('msg') return ts, msg
def function[_parse_supybot_timestamp, parameter[self, line]]: constant[Parse timestamp section] variable[m] assign[=] call[name[self].SUPYBOT_TIMESTAMP_REGEX.match, parameter[name[line]]] if <ast.UnaryOp object at 0x7da1b0286cb0> begin[:] variable[msg] assign[=] binary_operation[constant[date expected on line %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[self].nline]]] <ast.Raise object at 0x7da1b0286650> variable[ts] assign[=] call[name[m].group, parameter[constant[ts]]] variable[msg] assign[=] call[name[m].group, parameter[constant[msg]]] return[tuple[[<ast.Name object at 0x7da1b0380df0>, <ast.Name object at 0x7da1b0381360>]]]
keyword[def] identifier[_parse_supybot_timestamp] ( identifier[self] , identifier[line] ): literal[string] identifier[m] = identifier[self] . identifier[SUPYBOT_TIMESTAMP_REGEX] . identifier[match] ( identifier[line] ) keyword[if] keyword[not] identifier[m] : identifier[msg] = literal[string] %( identifier[str] ( identifier[self] . identifier[nline] )) keyword[raise] identifier[ParseError] ( identifier[cause] = identifier[msg] ) identifier[ts] = identifier[m] . identifier[group] ( literal[string] ) identifier[msg] = identifier[m] . identifier[group] ( literal[string] ) keyword[return] identifier[ts] , identifier[msg]
def _parse_supybot_timestamp(self, line): """Parse timestamp section""" m = self.SUPYBOT_TIMESTAMP_REGEX.match(line) if not m: msg = 'date expected on line %s' % str(self.nline) raise ParseError(cause=msg) # depends on [control=['if'], data=[]] ts = m.group('ts') msg = m.group('msg') return (ts, msg)
def moveEvent(self, event): """Reimplement Qt method""" if not self.isMaximized() and not self.fullscreen_flag: self.window_position = self.pos() QMainWindow.moveEvent(self, event) # To be used by the tour to be able to move self.sig_moved.emit(event)
def function[moveEvent, parameter[self, event]]: constant[Reimplement Qt method] if <ast.BoolOp object at 0x7da18bcc8a30> begin[:] name[self].window_position assign[=] call[name[self].pos, parameter[]] call[name[QMainWindow].moveEvent, parameter[name[self], name[event]]] call[name[self].sig_moved.emit, parameter[name[event]]]
keyword[def] identifier[moveEvent] ( identifier[self] , identifier[event] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[isMaximized] () keyword[and] keyword[not] identifier[self] . identifier[fullscreen_flag] : identifier[self] . identifier[window_position] = identifier[self] . identifier[pos] () identifier[QMainWindow] . identifier[moveEvent] ( identifier[self] , identifier[event] ) identifier[self] . identifier[sig_moved] . identifier[emit] ( identifier[event] )
def moveEvent(self, event): """Reimplement Qt method""" if not self.isMaximized() and (not self.fullscreen_flag): self.window_position = self.pos() # depends on [control=['if'], data=[]] QMainWindow.moveEvent(self, event) # To be used by the tour to be able to move self.sig_moved.emit(event)
def CreateClass(self, *args, **kwargs): """ Override the CreateClass method in MOFWBEMConnection For a description of the parameters, see :meth:`pywbem.WBEMConnection.CreateClass`. """ cc = args[0] if args else kwargs['NewClass'] namespace = self.getns() try: self.compile_ordered_classnames.append(cc.classname) # The following generates an exception for each new ns self.classes[self.default_namespace][cc.classname] = cc except KeyError: self.classes[namespace] = \ NocaseDict({cc.classname: cc}) # Validate that references and embedded instance properties, methods, # etc. have classes that exist in repo. This also institates the # mechanism that gets insures that prerequisite classes are inserted # into the repo. objects = list(cc.properties.values()) for meth in cc.methods.values(): objects += list(meth.parameters.values()) for obj in objects: # Validate that reference_class exists in repo if obj.type == 'reference': try: self.GetClass(obj.reference_class, LocalOnly=True, IncludeQualifiers=True) except CIMError as ce: if ce.status_code == CIM_ERR_NOT_FOUND: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Class {0!A} referenced by element {1!A} " "of class {2!A} in namespace {3!A} does " "not exist", obj.reference_class, obj.name, cc.classname, self.getns()), conn_id=self.conn_id) raise elif obj.type == 'string': if 'EmbeddedInstance' in obj.qualifiers: eiqualifier = obj.qualifiers['EmbeddedInstance'] try: self.GetClass(eiqualifier.value, LocalOnly=True, IncludeQualifiers=False) except CIMError as ce: if ce.status_code == CIM_ERR_NOT_FOUND: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Class {0!A} specified by " "EmbeddInstance qualifier on element " "{1!A} of class {2!A} in namespace " "{3!A} does not exist", eiqualifier.value, obj.name, cc.classname, self.getns()), conn_id=self.conn_id) raise ccr = self.conn._resolve_class( # pylint: disable=protected-access cc, namespace, self.qualifiers[namespace]) if namespace not in self.classes: self.classes[namespace] = NocaseDict() self.classes[namespace][ccr.classname] = ccr try: self.class_names[namespace].append(ccr.classname) except KeyError: self.class_names[namespace] = [ccr.classname]
def function[CreateClass, parameter[self]]: constant[ Override the CreateClass method in MOFWBEMConnection For a description of the parameters, see :meth:`pywbem.WBEMConnection.CreateClass`. ] variable[cc] assign[=] <ast.IfExp object at 0x7da207f9ac80> variable[namespace] assign[=] call[name[self].getns, parameter[]] <ast.Try object at 0x7da207f98400> variable[objects] assign[=] call[name[list], parameter[call[name[cc].properties.values, parameter[]]]] for taget[name[meth]] in starred[call[name[cc].methods.values, parameter[]]] begin[:] <ast.AugAssign object at 0x7da207f9ab00> for taget[name[obj]] in starred[name[objects]] begin[:] if compare[name[obj].type equal[==] constant[reference]] begin[:] <ast.Try object at 0x7da207f9b730> variable[ccr] assign[=] call[name[self].conn._resolve_class, parameter[name[cc], name[namespace], call[name[self].qualifiers][name[namespace]]]] if compare[name[namespace] <ast.NotIn object at 0x7da2590d7190> name[self].classes] begin[:] call[name[self].classes][name[namespace]] assign[=] call[name[NocaseDict], parameter[]] call[call[name[self].classes][name[namespace]]][name[ccr].classname] assign[=] name[ccr] <ast.Try object at 0x7da207f9b460>
keyword[def] identifier[CreateClass] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[cc] = identifier[args] [ literal[int] ] keyword[if] identifier[args] keyword[else] identifier[kwargs] [ literal[string] ] identifier[namespace] = identifier[self] . identifier[getns] () keyword[try] : identifier[self] . identifier[compile_ordered_classnames] . identifier[append] ( identifier[cc] . identifier[classname] ) identifier[self] . identifier[classes] [ identifier[self] . identifier[default_namespace] ][ identifier[cc] . identifier[classname] ]= identifier[cc] keyword[except] identifier[KeyError] : identifier[self] . identifier[classes] [ identifier[namespace] ]= identifier[NocaseDict] ({ identifier[cc] . identifier[classname] : identifier[cc] }) identifier[objects] = identifier[list] ( identifier[cc] . identifier[properties] . identifier[values] ()) keyword[for] identifier[meth] keyword[in] identifier[cc] . identifier[methods] . identifier[values] (): identifier[objects] += identifier[list] ( identifier[meth] . identifier[parameters] . identifier[values] ()) keyword[for] identifier[obj] keyword[in] identifier[objects] : keyword[if] identifier[obj] . identifier[type] == literal[string] : keyword[try] : identifier[self] . identifier[GetClass] ( identifier[obj] . identifier[reference_class] , identifier[LocalOnly] = keyword[True] , identifier[IncludeQualifiers] = keyword[True] ) keyword[except] identifier[CIMError] keyword[as] identifier[ce] : keyword[if] identifier[ce] . identifier[status_code] == identifier[CIM_ERR_NOT_FOUND] : keyword[raise] identifier[CIMError] ( identifier[CIM_ERR_INVALID_PARAMETER] , identifier[_format] ( literal[string] literal[string] literal[string] , identifier[obj] . identifier[reference_class] , identifier[obj] . identifier[name] , identifier[cc] . identifier[classname] , identifier[self] . identifier[getns] ()), identifier[conn_id] = identifier[self] . identifier[conn_id] ) keyword[raise] keyword[elif] identifier[obj] . identifier[type] == literal[string] : keyword[if] literal[string] keyword[in] identifier[obj] . identifier[qualifiers] : identifier[eiqualifier] = identifier[obj] . identifier[qualifiers] [ literal[string] ] keyword[try] : identifier[self] . identifier[GetClass] ( identifier[eiqualifier] . identifier[value] , identifier[LocalOnly] = keyword[True] , identifier[IncludeQualifiers] = keyword[False] ) keyword[except] identifier[CIMError] keyword[as] identifier[ce] : keyword[if] identifier[ce] . identifier[status_code] == identifier[CIM_ERR_NOT_FOUND] : keyword[raise] identifier[CIMError] ( identifier[CIM_ERR_INVALID_PARAMETER] , identifier[_format] ( literal[string] literal[string] literal[string] literal[string] , identifier[eiqualifier] . identifier[value] , identifier[obj] . identifier[name] , identifier[cc] . identifier[classname] , identifier[self] . identifier[getns] ()), identifier[conn_id] = identifier[self] . identifier[conn_id] ) keyword[raise] identifier[ccr] = identifier[self] . identifier[conn] . identifier[_resolve_class] ( identifier[cc] , identifier[namespace] , identifier[self] . identifier[qualifiers] [ identifier[namespace] ]) keyword[if] identifier[namespace] keyword[not] keyword[in] identifier[self] . identifier[classes] : identifier[self] . identifier[classes] [ identifier[namespace] ]= identifier[NocaseDict] () identifier[self] . identifier[classes] [ identifier[namespace] ][ identifier[ccr] . identifier[classname] ]= identifier[ccr] keyword[try] : identifier[self] . identifier[class_names] [ identifier[namespace] ]. identifier[append] ( identifier[ccr] . identifier[classname] ) keyword[except] identifier[KeyError] : identifier[self] . identifier[class_names] [ identifier[namespace] ]=[ identifier[ccr] . identifier[classname] ]
def CreateClass(self, *args, **kwargs): """ Override the CreateClass method in MOFWBEMConnection For a description of the parameters, see :meth:`pywbem.WBEMConnection.CreateClass`. """ cc = args[0] if args else kwargs['NewClass'] namespace = self.getns() try: self.compile_ordered_classnames.append(cc.classname) # The following generates an exception for each new ns self.classes[self.default_namespace][cc.classname] = cc # depends on [control=['try'], data=[]] except KeyError: self.classes[namespace] = NocaseDict({cc.classname: cc}) # depends on [control=['except'], data=[]] # Validate that references and embedded instance properties, methods, # etc. have classes that exist in repo. This also institates the # mechanism that gets insures that prerequisite classes are inserted # into the repo. objects = list(cc.properties.values()) for meth in cc.methods.values(): objects += list(meth.parameters.values()) # depends on [control=['for'], data=['meth']] for obj in objects: # Validate that reference_class exists in repo if obj.type == 'reference': try: self.GetClass(obj.reference_class, LocalOnly=True, IncludeQualifiers=True) # depends on [control=['try'], data=[]] except CIMError as ce: if ce.status_code == CIM_ERR_NOT_FOUND: raise CIMError(CIM_ERR_INVALID_PARAMETER, _format('Class {0!A} referenced by element {1!A} of class {2!A} in namespace {3!A} does not exist', obj.reference_class, obj.name, cc.classname, self.getns()), conn_id=self.conn_id) # depends on [control=['if'], data=[]] raise # depends on [control=['except'], data=['ce']] # depends on [control=['if'], data=[]] elif obj.type == 'string': if 'EmbeddedInstance' in obj.qualifiers: eiqualifier = obj.qualifiers['EmbeddedInstance'] try: self.GetClass(eiqualifier.value, LocalOnly=True, IncludeQualifiers=False) # depends on [control=['try'], data=[]] except CIMError as ce: if ce.status_code == CIM_ERR_NOT_FOUND: raise CIMError(CIM_ERR_INVALID_PARAMETER, _format('Class {0!A} specified by EmbeddInstance qualifier on element {1!A} of class {2!A} in namespace {3!A} does not exist', eiqualifier.value, obj.name, cc.classname, self.getns()), conn_id=self.conn_id) # depends on [control=['if'], data=[]] raise # depends on [control=['except'], data=['ce']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj']] # pylint: disable=protected-access ccr = self.conn._resolve_class(cc, namespace, self.qualifiers[namespace]) if namespace not in self.classes: self.classes[namespace] = NocaseDict() # depends on [control=['if'], data=['namespace']] self.classes[namespace][ccr.classname] = ccr try: self.class_names[namespace].append(ccr.classname) # depends on [control=['try'], data=[]] except KeyError: self.class_names[namespace] = [ccr.classname] # depends on [control=['except'], data=[]]
def clean_package_cache(self, cache_name='com.gliffy.cache.gon'): """ Clean caches from cache management e.g. com.gliffy.cache.gon org.hibernate.cache.internal.StandardQueryCache_v5 """ headers = self.form_token_headers data = {'cacheName': cache_name} return self.delete('rest/cacheManagement/1.0/cacheEntries', data=data, headers=headers)
def function[clean_package_cache, parameter[self, cache_name]]: constant[ Clean caches from cache management e.g. com.gliffy.cache.gon org.hibernate.cache.internal.StandardQueryCache_v5 ] variable[headers] assign[=] name[self].form_token_headers variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20c6ab850>], [<ast.Name object at 0x7da20c6a9900>]] return[call[name[self].delete, parameter[constant[rest/cacheManagement/1.0/cacheEntries]]]]
keyword[def] identifier[clean_package_cache] ( identifier[self] , identifier[cache_name] = literal[string] ): literal[string] identifier[headers] = identifier[self] . identifier[form_token_headers] identifier[data] ={ literal[string] : identifier[cache_name] } keyword[return] identifier[self] . identifier[delete] ( literal[string] , identifier[data] = identifier[data] , identifier[headers] = identifier[headers] )
def clean_package_cache(self, cache_name='com.gliffy.cache.gon'): """ Clean caches from cache management e.g. com.gliffy.cache.gon org.hibernate.cache.internal.StandardQueryCache_v5 """ headers = self.form_token_headers data = {'cacheName': cache_name} return self.delete('rest/cacheManagement/1.0/cacheEntries', data=data, headers=headers)
def search_media(self, series, query_string): """Search for media from a series starting with query_string, case-sensitive @param crunchyroll.models.Series series the series to search in @param str query_string the search query, same restrictions as `search_anime_series` @return list<crunchyroll.models.Media> """ params = { 'sort': ANDROID.FILTER_PREFIX + query_string, } params.update(self._get_series_query_dict(series)) result = self._android_api.list_media(**params) return result
def function[search_media, parameter[self, series, query_string]]: constant[Search for media from a series starting with query_string, case-sensitive @param crunchyroll.models.Series series the series to search in @param str query_string the search query, same restrictions as `search_anime_series` @return list<crunchyroll.models.Media> ] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c76da20>], [<ast.BinOp object at 0x7da20c76f5b0>]] call[name[params].update, parameter[call[name[self]._get_series_query_dict, parameter[name[series]]]]] variable[result] assign[=] call[name[self]._android_api.list_media, parameter[]] return[name[result]]
keyword[def] identifier[search_media] ( identifier[self] , identifier[series] , identifier[query_string] ): literal[string] identifier[params] ={ literal[string] : identifier[ANDROID] . identifier[FILTER_PREFIX] + identifier[query_string] , } identifier[params] . identifier[update] ( identifier[self] . identifier[_get_series_query_dict] ( identifier[series] )) identifier[result] = identifier[self] . identifier[_android_api] . identifier[list_media] (** identifier[params] ) keyword[return] identifier[result]
def search_media(self, series, query_string): """Search for media from a series starting with query_string, case-sensitive @param crunchyroll.models.Series series the series to search in @param str query_string the search query, same restrictions as `search_anime_series` @return list<crunchyroll.models.Media> """ params = {'sort': ANDROID.FILTER_PREFIX + query_string} params.update(self._get_series_query_dict(series)) result = self._android_api.list_media(**params) return result
def download(self, url, listener=None, path_or_fd=None, chunk_size_bytes=None, timeout_secs=None): """Downloads data from the given URL. By default data is downloaded to a temporary file. :param string url: the url to GET data from :param listener: an optional listener to notify of all download lifecycle events :param path_or_fd: an optional file path or open file descriptor to write data to :param chunk_size_bytes: the chunk size to use for buffering data :param timeout_secs: the maximum time to wait for data to be available :returns: the path to the file data was downloaded to. :raises: Fetcher.Error if there was a problem downloading all data from the given url. """ @contextmanager def download_fp(_path_or_fd): if _path_or_fd and not isinstance(_path_or_fd, six.string_types): yield _path_or_fd, _path_or_fd.name else: if not _path_or_fd: fd, _path_or_fd = tempfile.mkstemp() os.close(fd) with safe_open(_path_or_fd, 'wb') as fp: yield fp, _path_or_fd with download_fp(path_or_fd) as (fp, path): listener = self.DownloadListener(fp).wrap(listener) self.fetch(url, listener, chunk_size_bytes=chunk_size_bytes, timeout_secs=timeout_secs) return path
def function[download, parameter[self, url, listener, path_or_fd, chunk_size_bytes, timeout_secs]]: constant[Downloads data from the given URL. By default data is downloaded to a temporary file. :param string url: the url to GET data from :param listener: an optional listener to notify of all download lifecycle events :param path_or_fd: an optional file path or open file descriptor to write data to :param chunk_size_bytes: the chunk size to use for buffering data :param timeout_secs: the maximum time to wait for data to be available :returns: the path to the file data was downloaded to. :raises: Fetcher.Error if there was a problem downloading all data from the given url. ] def function[download_fp, parameter[_path_or_fd]]: if <ast.BoolOp object at 0x7da1b22b9f30> begin[:] <ast.Yield object at 0x7da1b22b97b0> with call[name[download_fp], parameter[name[path_or_fd]]] begin[:] variable[listener] assign[=] call[call[name[self].DownloadListener, parameter[name[fp]]].wrap, parameter[name[listener]]] call[name[self].fetch, parameter[name[url], name[listener]]] return[name[path]]
keyword[def] identifier[download] ( identifier[self] , identifier[url] , identifier[listener] = keyword[None] , identifier[path_or_fd] = keyword[None] , identifier[chunk_size_bytes] = keyword[None] , identifier[timeout_secs] = keyword[None] ): literal[string] @ identifier[contextmanager] keyword[def] identifier[download_fp] ( identifier[_path_or_fd] ): keyword[if] identifier[_path_or_fd] keyword[and] keyword[not] identifier[isinstance] ( identifier[_path_or_fd] , identifier[six] . identifier[string_types] ): keyword[yield] identifier[_path_or_fd] , identifier[_path_or_fd] . identifier[name] keyword[else] : keyword[if] keyword[not] identifier[_path_or_fd] : identifier[fd] , identifier[_path_or_fd] = identifier[tempfile] . identifier[mkstemp] () identifier[os] . identifier[close] ( identifier[fd] ) keyword[with] identifier[safe_open] ( identifier[_path_or_fd] , literal[string] ) keyword[as] identifier[fp] : keyword[yield] identifier[fp] , identifier[_path_or_fd] keyword[with] identifier[download_fp] ( identifier[path_or_fd] ) keyword[as] ( identifier[fp] , identifier[path] ): identifier[listener] = identifier[self] . identifier[DownloadListener] ( identifier[fp] ). identifier[wrap] ( identifier[listener] ) identifier[self] . identifier[fetch] ( identifier[url] , identifier[listener] , identifier[chunk_size_bytes] = identifier[chunk_size_bytes] , identifier[timeout_secs] = identifier[timeout_secs] ) keyword[return] identifier[path]
def download(self, url, listener=None, path_or_fd=None, chunk_size_bytes=None, timeout_secs=None): """Downloads data from the given URL. By default data is downloaded to a temporary file. :param string url: the url to GET data from :param listener: an optional listener to notify of all download lifecycle events :param path_or_fd: an optional file path or open file descriptor to write data to :param chunk_size_bytes: the chunk size to use for buffering data :param timeout_secs: the maximum time to wait for data to be available :returns: the path to the file data was downloaded to. :raises: Fetcher.Error if there was a problem downloading all data from the given url. """ @contextmanager def download_fp(_path_or_fd): if _path_or_fd and (not isinstance(_path_or_fd, six.string_types)): yield (_path_or_fd, _path_or_fd.name) # depends on [control=['if'], data=[]] else: if not _path_or_fd: (fd, _path_or_fd) = tempfile.mkstemp() os.close(fd) # depends on [control=['if'], data=[]] with safe_open(_path_or_fd, 'wb') as fp: yield (fp, _path_or_fd) # depends on [control=['with'], data=['fp']] with download_fp(path_or_fd) as (fp, path): listener = self.DownloadListener(fp).wrap(listener) self.fetch(url, listener, chunk_size_bytes=chunk_size_bytes, timeout_secs=timeout_secs) return path # depends on [control=['with'], data=[]]
def set_integration_time(self, integration_time): """Sets the integration time for the TC34725. Provide one of these constants: - TCS34725_INTEGRATIONTIME_2_4MS = 2.4ms - 1 cycle - Max Count: 1024 - TCS34725_INTEGRATIONTIME_24MS = 24ms - 10 cycles - Max Count: 10240 - TCS34725_INTEGRATIONTIME_50MS = 50ms - 20 cycles - Max Count: 20480 - TCS34725_INTEGRATIONTIME_101MS = 101ms - 42 cycles - Max Count: 43008 - TCS34725_INTEGRATIONTIME_154MS = 154ms - 64 cycles - Max Count: 65535 - TCS34725_INTEGRATIONTIME_700MS = 700ms - 256 cycles - Max Count: 65535 """ self._integration_time = integration_time self._write8(TCS34725_ATIME, integration_time)
def function[set_integration_time, parameter[self, integration_time]]: constant[Sets the integration time for the TC34725. Provide one of these constants: - TCS34725_INTEGRATIONTIME_2_4MS = 2.4ms - 1 cycle - Max Count: 1024 - TCS34725_INTEGRATIONTIME_24MS = 24ms - 10 cycles - Max Count: 10240 - TCS34725_INTEGRATIONTIME_50MS = 50ms - 20 cycles - Max Count: 20480 - TCS34725_INTEGRATIONTIME_101MS = 101ms - 42 cycles - Max Count: 43008 - TCS34725_INTEGRATIONTIME_154MS = 154ms - 64 cycles - Max Count: 65535 - TCS34725_INTEGRATIONTIME_700MS = 700ms - 256 cycles - Max Count: 65535 ] name[self]._integration_time assign[=] name[integration_time] call[name[self]._write8, parameter[name[TCS34725_ATIME], name[integration_time]]]
keyword[def] identifier[set_integration_time] ( identifier[self] , identifier[integration_time] ): literal[string] identifier[self] . identifier[_integration_time] = identifier[integration_time] identifier[self] . identifier[_write8] ( identifier[TCS34725_ATIME] , identifier[integration_time] )
def set_integration_time(self, integration_time): """Sets the integration time for the TC34725. Provide one of these constants: - TCS34725_INTEGRATIONTIME_2_4MS = 2.4ms - 1 cycle - Max Count: 1024 - TCS34725_INTEGRATIONTIME_24MS = 24ms - 10 cycles - Max Count: 10240 - TCS34725_INTEGRATIONTIME_50MS = 50ms - 20 cycles - Max Count: 20480 - TCS34725_INTEGRATIONTIME_101MS = 101ms - 42 cycles - Max Count: 43008 - TCS34725_INTEGRATIONTIME_154MS = 154ms - 64 cycles - Max Count: 65535 - TCS34725_INTEGRATIONTIME_700MS = 700ms - 256 cycles - Max Count: 65535 """ self._integration_time = integration_time self._write8(TCS34725_ATIME, integration_time)
def eventsource_connect(url, io_loop=None, callback=None, connect_timeout=None): """Client-side eventsource support. Takes a url and returns a Future whose result is a `EventSourceClient`. """ if io_loop is None: io_loop = IOLoop.current() if isinstance(url, httpclient.HTTPRequest): assert connect_timeout is None request = url # Copy and convert the headers dict/object (see comments in # AsyncHTTPClient.fetch) request.headers = httputil.HTTPHeaders(request.headers) else: request = httpclient.HTTPRequest( url, connect_timeout=connect_timeout, headers=httputil.HTTPHeaders({ "Accept-Encoding": "identity" }) ) request = httpclient._RequestProxy( request, httpclient.HTTPRequest._DEFAULTS) conn = EventSourceClient(io_loop, request) if callback is not None: io_loop.add_future(conn.connect_future, callback) return conn.connect_future
def function[eventsource_connect, parameter[url, io_loop, callback, connect_timeout]]: constant[Client-side eventsource support. Takes a url and returns a Future whose result is a `EventSourceClient`. ] if compare[name[io_loop] is constant[None]] begin[:] variable[io_loop] assign[=] call[name[IOLoop].current, parameter[]] if call[name[isinstance], parameter[name[url], name[httpclient].HTTPRequest]] begin[:] assert[compare[name[connect_timeout] is constant[None]]] variable[request] assign[=] name[url] name[request].headers assign[=] call[name[httputil].HTTPHeaders, parameter[name[request].headers]] variable[request] assign[=] call[name[httpclient]._RequestProxy, parameter[name[request], name[httpclient].HTTPRequest._DEFAULTS]] variable[conn] assign[=] call[name[EventSourceClient], parameter[name[io_loop], name[request]]] if compare[name[callback] is_not constant[None]] begin[:] call[name[io_loop].add_future, parameter[name[conn].connect_future, name[callback]]] return[name[conn].connect_future]
keyword[def] identifier[eventsource_connect] ( identifier[url] , identifier[io_loop] = keyword[None] , identifier[callback] = keyword[None] , identifier[connect_timeout] = keyword[None] ): literal[string] keyword[if] identifier[io_loop] keyword[is] keyword[None] : identifier[io_loop] = identifier[IOLoop] . identifier[current] () keyword[if] identifier[isinstance] ( identifier[url] , identifier[httpclient] . identifier[HTTPRequest] ): keyword[assert] identifier[connect_timeout] keyword[is] keyword[None] identifier[request] = identifier[url] identifier[request] . identifier[headers] = identifier[httputil] . identifier[HTTPHeaders] ( identifier[request] . identifier[headers] ) keyword[else] : identifier[request] = identifier[httpclient] . identifier[HTTPRequest] ( identifier[url] , identifier[connect_timeout] = identifier[connect_timeout] , identifier[headers] = identifier[httputil] . identifier[HTTPHeaders] ({ literal[string] : literal[string] }) ) identifier[request] = identifier[httpclient] . identifier[_RequestProxy] ( identifier[request] , identifier[httpclient] . identifier[HTTPRequest] . identifier[_DEFAULTS] ) identifier[conn] = identifier[EventSourceClient] ( identifier[io_loop] , identifier[request] ) keyword[if] identifier[callback] keyword[is] keyword[not] keyword[None] : identifier[io_loop] . identifier[add_future] ( identifier[conn] . identifier[connect_future] , identifier[callback] ) keyword[return] identifier[conn] . identifier[connect_future]
def eventsource_connect(url, io_loop=None, callback=None, connect_timeout=None): """Client-side eventsource support. Takes a url and returns a Future whose result is a `EventSourceClient`. """ if io_loop is None: io_loop = IOLoop.current() # depends on [control=['if'], data=['io_loop']] if isinstance(url, httpclient.HTTPRequest): assert connect_timeout is None request = url # Copy and convert the headers dict/object (see comments in # AsyncHTTPClient.fetch) request.headers = httputil.HTTPHeaders(request.headers) # depends on [control=['if'], data=[]] else: request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout, headers=httputil.HTTPHeaders({'Accept-Encoding': 'identity'})) request = httpclient._RequestProxy(request, httpclient.HTTPRequest._DEFAULTS) conn = EventSourceClient(io_loop, request) if callback is not None: io_loop.add_future(conn.connect_future, callback) # depends on [control=['if'], data=['callback']] return conn.connect_future
def set_custom_colorset(self): """Defines a colorset with matching colors. Provided by Joachim.""" cmd.set_color('myorange', '[253, 174, 97]') cmd.set_color('mygreen', '[171, 221, 164]') cmd.set_color('myred', '[215, 25, 28]') cmd.set_color('myblue', '[43, 131, 186]') cmd.set_color('mylightblue', '[158, 202, 225]') cmd.set_color('mylightgreen', '[229, 245, 224]')
def function[set_custom_colorset, parameter[self]]: constant[Defines a colorset with matching colors. Provided by Joachim.] call[name[cmd].set_color, parameter[constant[myorange], constant[[253, 174, 97]]]] call[name[cmd].set_color, parameter[constant[mygreen], constant[[171, 221, 164]]]] call[name[cmd].set_color, parameter[constant[myred], constant[[215, 25, 28]]]] call[name[cmd].set_color, parameter[constant[myblue], constant[[43, 131, 186]]]] call[name[cmd].set_color, parameter[constant[mylightblue], constant[[158, 202, 225]]]] call[name[cmd].set_color, parameter[constant[mylightgreen], constant[[229, 245, 224]]]]
keyword[def] identifier[set_custom_colorset] ( identifier[self] ): literal[string] identifier[cmd] . identifier[set_color] ( literal[string] , literal[string] ) identifier[cmd] . identifier[set_color] ( literal[string] , literal[string] ) identifier[cmd] . identifier[set_color] ( literal[string] , literal[string] ) identifier[cmd] . identifier[set_color] ( literal[string] , literal[string] ) identifier[cmd] . identifier[set_color] ( literal[string] , literal[string] ) identifier[cmd] . identifier[set_color] ( literal[string] , literal[string] )
def set_custom_colorset(self): """Defines a colorset with matching colors. Provided by Joachim.""" cmd.set_color('myorange', '[253, 174, 97]') cmd.set_color('mygreen', '[171, 221, 164]') cmd.set_color('myred', '[215, 25, 28]') cmd.set_color('myblue', '[43, 131, 186]') cmd.set_color('mylightblue', '[158, 202, 225]') cmd.set_color('mylightgreen', '[229, 245, 224]')
def radiance2tb(self, rad): """ Get the Tb from the radiance using the Planck function and the central wavelength of the band rad: Radiance in SI units """ return radiance2tb(rad, self.rsr[self.bandname][self.detector]['central_wavelength'] * 1e-6)
def function[radiance2tb, parameter[self, rad]]: constant[ Get the Tb from the radiance using the Planck function and the central wavelength of the band rad: Radiance in SI units ] return[call[name[radiance2tb], parameter[name[rad], binary_operation[call[call[call[name[self].rsr][name[self].bandname]][name[self].detector]][constant[central_wavelength]] * constant[1e-06]]]]]
keyword[def] identifier[radiance2tb] ( identifier[self] , identifier[rad] ): literal[string] keyword[return] identifier[radiance2tb] ( identifier[rad] , identifier[self] . identifier[rsr] [ identifier[self] . identifier[bandname] ][ identifier[self] . identifier[detector] ][ literal[string] ]* literal[int] )
def radiance2tb(self, rad): """ Get the Tb from the radiance using the Planck function and the central wavelength of the band rad: Radiance in SI units """ return radiance2tb(rad, self.rsr[self.bandname][self.detector]['central_wavelength'] * 1e-06)
def refresh_decorations(self, force=False): """ Refresh decorations colors. This function is called by the syntax highlighter when the style changed so that we may update our decorations colors according to the new style. """ cursor = self.editor.textCursor() if (self._prev_cursor is None or force or self._prev_cursor.blockNumber() != cursor.blockNumber()): for deco in self._block_decos: self.editor.decorations.remove(deco) for deco in self._block_decos: deco.set_outline(drift_color( self._get_scope_highlight_color(), 110)) deco.set_background(self._get_scope_highlight_color()) self.editor.decorations.add(deco) self._prev_cursor = cursor
def function[refresh_decorations, parameter[self, force]]: constant[ Refresh decorations colors. This function is called by the syntax highlighter when the style changed so that we may update our decorations colors according to the new style. ] variable[cursor] assign[=] call[name[self].editor.textCursor, parameter[]] if <ast.BoolOp object at 0x7da18f09e170> begin[:] for taget[name[deco]] in starred[name[self]._block_decos] begin[:] call[name[self].editor.decorations.remove, parameter[name[deco]]] for taget[name[deco]] in starred[name[self]._block_decos] begin[:] call[name[deco].set_outline, parameter[call[name[drift_color], parameter[call[name[self]._get_scope_highlight_color, parameter[]], constant[110]]]]] call[name[deco].set_background, parameter[call[name[self]._get_scope_highlight_color, parameter[]]]] call[name[self].editor.decorations.add, parameter[name[deco]]] name[self]._prev_cursor assign[=] name[cursor]
keyword[def] identifier[refresh_decorations] ( identifier[self] , identifier[force] = keyword[False] ): literal[string] identifier[cursor] = identifier[self] . identifier[editor] . identifier[textCursor] () keyword[if] ( identifier[self] . identifier[_prev_cursor] keyword[is] keyword[None] keyword[or] identifier[force] keyword[or] identifier[self] . identifier[_prev_cursor] . identifier[blockNumber] ()!= identifier[cursor] . identifier[blockNumber] ()): keyword[for] identifier[deco] keyword[in] identifier[self] . identifier[_block_decos] : identifier[self] . identifier[editor] . identifier[decorations] . identifier[remove] ( identifier[deco] ) keyword[for] identifier[deco] keyword[in] identifier[self] . identifier[_block_decos] : identifier[deco] . identifier[set_outline] ( identifier[drift_color] ( identifier[self] . identifier[_get_scope_highlight_color] (), literal[int] )) identifier[deco] . identifier[set_background] ( identifier[self] . identifier[_get_scope_highlight_color] ()) identifier[self] . identifier[editor] . identifier[decorations] . identifier[add] ( identifier[deco] ) identifier[self] . identifier[_prev_cursor] = identifier[cursor]
def refresh_decorations(self, force=False): """ Refresh decorations colors. This function is called by the syntax highlighter when the style changed so that we may update our decorations colors according to the new style. """ cursor = self.editor.textCursor() if self._prev_cursor is None or force or self._prev_cursor.blockNumber() != cursor.blockNumber(): for deco in self._block_decos: self.editor.decorations.remove(deco) # depends on [control=['for'], data=['deco']] for deco in self._block_decos: deco.set_outline(drift_color(self._get_scope_highlight_color(), 110)) deco.set_background(self._get_scope_highlight_color()) self.editor.decorations.add(deco) # depends on [control=['for'], data=['deco']] # depends on [control=['if'], data=[]] self._prev_cursor = cursor
def get_witness_for_key_prefix(db, node_hash, key): """ Get all witness given a keypath prefix. Include 1. witness along the keypath and 2. witness in the subtrie of the last node in keypath """ validate_is_bytes(key) return tuple(_get_witness_for_key_prefix(db, node_hash, encode_to_bin(key)))
def function[get_witness_for_key_prefix, parameter[db, node_hash, key]]: constant[ Get all witness given a keypath prefix. Include 1. witness along the keypath and 2. witness in the subtrie of the last node in keypath ] call[name[validate_is_bytes], parameter[name[key]]] return[call[name[tuple], parameter[call[name[_get_witness_for_key_prefix], parameter[name[db], name[node_hash], call[name[encode_to_bin], parameter[name[key]]]]]]]]
keyword[def] identifier[get_witness_for_key_prefix] ( identifier[db] , identifier[node_hash] , identifier[key] ): literal[string] identifier[validate_is_bytes] ( identifier[key] ) keyword[return] identifier[tuple] ( identifier[_get_witness_for_key_prefix] ( identifier[db] , identifier[node_hash] , identifier[encode_to_bin] ( identifier[key] )))
def get_witness_for_key_prefix(db, node_hash, key): """ Get all witness given a keypath prefix. Include 1. witness along the keypath and 2. witness in the subtrie of the last node in keypath """ validate_is_bytes(key) return tuple(_get_witness_for_key_prefix(db, node_hash, encode_to_bin(key)))
def add_emission(self, chunksize=2**19, comp_filter=default_compression, overwrite=False, params=dict(), chunkslice='bytes'): """Add the `emission` array in '/trajectories'. """ nparams = self.numeric_params num_particles = nparams['np'] return self.add_trajectory('emission', shape=(num_particles, 0), overwrite=overwrite, chunksize=chunksize, comp_filter=comp_filter, atom=tables.Float32Atom(), title='Emission trace of each particle', params=params)
def function[add_emission, parameter[self, chunksize, comp_filter, overwrite, params, chunkslice]]: constant[Add the `emission` array in '/trajectories'. ] variable[nparams] assign[=] name[self].numeric_params variable[num_particles] assign[=] call[name[nparams]][constant[np]] return[call[name[self].add_trajectory, parameter[constant[emission]]]]
keyword[def] identifier[add_emission] ( identifier[self] , identifier[chunksize] = literal[int] ** literal[int] , identifier[comp_filter] = identifier[default_compression] , identifier[overwrite] = keyword[False] , identifier[params] = identifier[dict] (), identifier[chunkslice] = literal[string] ): literal[string] identifier[nparams] = identifier[self] . identifier[numeric_params] identifier[num_particles] = identifier[nparams] [ literal[string] ] keyword[return] identifier[self] . identifier[add_trajectory] ( literal[string] , identifier[shape] =( identifier[num_particles] , literal[int] ), identifier[overwrite] = identifier[overwrite] , identifier[chunksize] = identifier[chunksize] , identifier[comp_filter] = identifier[comp_filter] , identifier[atom] = identifier[tables] . identifier[Float32Atom] (), identifier[title] = literal[string] , identifier[params] = identifier[params] )
def add_emission(self, chunksize=2 ** 19, comp_filter=default_compression, overwrite=False, params=dict(), chunkslice='bytes'): """Add the `emission` array in '/trajectories'. """ nparams = self.numeric_params num_particles = nparams['np'] return self.add_trajectory('emission', shape=(num_particles, 0), overwrite=overwrite, chunksize=chunksize, comp_filter=comp_filter, atom=tables.Float32Atom(), title='Emission trace of each particle', params=params)
def rgb2cmy(self, img, whitebg=False): """transforms image from RGB to CMY""" tmp = img*1.0 if whitebg: tmp = (1.0 - (img - img.min())/(img.max() - img.min())) out = tmp*0.0 out[:,:,0] = (tmp[:,:,1] + tmp[:,:,2])/2.0 out[:,:,1] = (tmp[:,:,0] + tmp[:,:,2])/2.0 out[:,:,2] = (tmp[:,:,0] + tmp[:,:,1])/2.0 return out
def function[rgb2cmy, parameter[self, img, whitebg]]: constant[transforms image from RGB to CMY] variable[tmp] assign[=] binary_operation[name[img] * constant[1.0]] if name[whitebg] begin[:] variable[tmp] assign[=] binary_operation[constant[1.0] - binary_operation[binary_operation[name[img] - call[name[img].min, parameter[]]] / binary_operation[call[name[img].max, parameter[]] - call[name[img].min, parameter[]]]]] variable[out] assign[=] binary_operation[name[tmp] * constant[0.0]] call[name[out]][tuple[[<ast.Slice object at 0x7da207f00b20>, <ast.Slice object at 0x7da207f01570>, <ast.Constant object at 0x7da207f019c0>]]] assign[=] binary_operation[binary_operation[call[name[tmp]][tuple[[<ast.Slice object at 0x7da207f02020>, <ast.Slice object at 0x7da207f02800>, <ast.Constant object at 0x7da207f01660>]]] + call[name[tmp]][tuple[[<ast.Slice object at 0x7da207f02fb0>, <ast.Slice object at 0x7da207f013c0>, <ast.Constant object at 0x7da207f03370>]]]] / constant[2.0]] call[name[out]][tuple[[<ast.Slice object at 0x7da207f002b0>, <ast.Slice object at 0x7da207f02b30>, <ast.Constant object at 0x7da207f011b0>]]] assign[=] binary_operation[binary_operation[call[name[tmp]][tuple[[<ast.Slice object at 0x7da207f01780>, <ast.Slice object at 0x7da207f01960>, <ast.Constant object at 0x7da207f01b70>]]] + call[name[tmp]][tuple[[<ast.Slice object at 0x7da207f01a80>, <ast.Slice object at 0x7da207f015a0>, <ast.Constant object at 0x7da207f03340>]]]] / constant[2.0]] call[name[out]][tuple[[<ast.Slice object at 0x7da207f039a0>, <ast.Slice object at 0x7da207f03160>, <ast.Constant object at 0x7da207f001f0>]]] assign[=] binary_operation[binary_operation[call[name[tmp]][tuple[[<ast.Slice object at 0x7da207f03be0>, <ast.Slice object at 0x7da207f03460>, <ast.Constant object at 0x7da207f01d50>]]] + call[name[tmp]][tuple[[<ast.Slice object at 0x7da207f015d0>, <ast.Slice object at 0x7da207f03310>, <ast.Constant object at 0x7da207f03f40>]]]] / constant[2.0]] return[name[out]]
keyword[def] identifier[rgb2cmy] ( identifier[self] , identifier[img] , identifier[whitebg] = keyword[False] ): literal[string] identifier[tmp] = identifier[img] * literal[int] keyword[if] identifier[whitebg] : identifier[tmp] =( literal[int] -( identifier[img] - identifier[img] . identifier[min] ())/( identifier[img] . identifier[max] ()- identifier[img] . identifier[min] ())) identifier[out] = identifier[tmp] * literal[int] identifier[out] [:,:, literal[int] ]=( identifier[tmp] [:,:, literal[int] ]+ identifier[tmp] [:,:, literal[int] ])/ literal[int] identifier[out] [:,:, literal[int] ]=( identifier[tmp] [:,:, literal[int] ]+ identifier[tmp] [:,:, literal[int] ])/ literal[int] identifier[out] [:,:, literal[int] ]=( identifier[tmp] [:,:, literal[int] ]+ identifier[tmp] [:,:, literal[int] ])/ literal[int] keyword[return] identifier[out]
def rgb2cmy(self, img, whitebg=False): """transforms image from RGB to CMY""" tmp = img * 1.0 if whitebg: tmp = 1.0 - (img - img.min()) / (img.max() - img.min()) # depends on [control=['if'], data=[]] out = tmp * 0.0 out[:, :, 0] = (tmp[:, :, 1] + tmp[:, :, 2]) / 2.0 out[:, :, 1] = (tmp[:, :, 0] + tmp[:, :, 2]) / 2.0 out[:, :, 2] = (tmp[:, :, 0] + tmp[:, :, 1]) / 2.0 return out
def clip(self, lower=0, upper=127): """ Clip the pianoroll by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianoroll. Defaults to 0. upper : int or float The upper bound to clip the pianoroll. Defaults to 127. """ self.pianoroll = self.pianoroll.clip(lower, upper)
def function[clip, parameter[self, lower, upper]]: constant[ Clip the pianoroll by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianoroll. Defaults to 0. upper : int or float The upper bound to clip the pianoroll. Defaults to 127. ] name[self].pianoroll assign[=] call[name[self].pianoroll.clip, parameter[name[lower], name[upper]]]
keyword[def] identifier[clip] ( identifier[self] , identifier[lower] = literal[int] , identifier[upper] = literal[int] ): literal[string] identifier[self] . identifier[pianoroll] = identifier[self] . identifier[pianoroll] . identifier[clip] ( identifier[lower] , identifier[upper] )
def clip(self, lower=0, upper=127): """ Clip the pianoroll by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianoroll. Defaults to 0. upper : int or float The upper bound to clip the pianoroll. Defaults to 127. """ self.pianoroll = self.pianoroll.clip(lower, upper)
def _update_slider_length_horizontal(self): """ Measure the length of the slider and update the value of self._sliderlength. self.scale.identify(x, y) is used to find the first and last pixels of the slider. Indeed, self.scale.identify(x, y) returns the element of the ttk.Scale to which the pixel (x, y) belongs. So, the length of the slider is determined by scanning horizontally the pixels of the scale. """ if not self.scale.identify(2, 2): # if self.scale.identify(2, 2) is an empty string it means that the scale # is not displayed yet so we cannot measure the length of the slider, # so wait for the scale to be properly displayed. # binding to <Map> event does not work, it can still be to soon to # get any result from identify self.after(10, self._update_slider_length_horizontal) else: w = self.scale.winfo_width() i = 0 # find the first pixel of the slider while i < w and 'slider' not in self.scale.identify(i, 2): # increment i until the pixel (i, 2) belongs to the slider i += 1 j = i # find the last pixel of the slider while j < w and 'slider' in self.scale.identify(j, 2): # increment j until the pixel (2, j) no longer belongs to the slider j += 1 if j == i: # the length of the slider was not determined properly, # so the value of the sliderlength from the style is used self._sliderlength = self.style.lookup(self._style_name, 'sliderlength', default=30) else: # update ticks and label placement self._sliderlength = j - i self._update_display()
def function[_update_slider_length_horizontal, parameter[self]]: constant[ Measure the length of the slider and update the value of self._sliderlength. self.scale.identify(x, y) is used to find the first and last pixels of the slider. Indeed, self.scale.identify(x, y) returns the element of the ttk.Scale to which the pixel (x, y) belongs. So, the length of the slider is determined by scanning horizontally the pixels of the scale. ] if <ast.UnaryOp object at 0x7da1b231e590> begin[:] call[name[self].after, parameter[constant[10], name[self]._update_slider_length_horizontal]]
keyword[def] identifier[_update_slider_length_horizontal] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[scale] . identifier[identify] ( literal[int] , literal[int] ): identifier[self] . identifier[after] ( literal[int] , identifier[self] . identifier[_update_slider_length_horizontal] ) keyword[else] : identifier[w] = identifier[self] . identifier[scale] . identifier[winfo_width] () identifier[i] = literal[int] keyword[while] identifier[i] < identifier[w] keyword[and] literal[string] keyword[not] keyword[in] identifier[self] . identifier[scale] . identifier[identify] ( identifier[i] , literal[int] ): identifier[i] += literal[int] identifier[j] = identifier[i] keyword[while] identifier[j] < identifier[w] keyword[and] literal[string] keyword[in] identifier[self] . identifier[scale] . identifier[identify] ( identifier[j] , literal[int] ): identifier[j] += literal[int] keyword[if] identifier[j] == identifier[i] : identifier[self] . identifier[_sliderlength] = identifier[self] . identifier[style] . identifier[lookup] ( identifier[self] . identifier[_style_name] , literal[string] , identifier[default] = literal[int] ) keyword[else] : identifier[self] . identifier[_sliderlength] = identifier[j] - identifier[i] identifier[self] . identifier[_update_display] ()
def _update_slider_length_horizontal(self): """ Measure the length of the slider and update the value of self._sliderlength. self.scale.identify(x, y) is used to find the first and last pixels of the slider. Indeed, self.scale.identify(x, y) returns the element of the ttk.Scale to which the pixel (x, y) belongs. So, the length of the slider is determined by scanning horizontally the pixels of the scale. """ if not self.scale.identify(2, 2): # if self.scale.identify(2, 2) is an empty string it means that the scale # is not displayed yet so we cannot measure the length of the slider, # so wait for the scale to be properly displayed. # binding to <Map> event does not work, it can still be to soon to # get any result from identify self.after(10, self._update_slider_length_horizontal) # depends on [control=['if'], data=[]] else: w = self.scale.winfo_width() i = 0 # find the first pixel of the slider while i < w and 'slider' not in self.scale.identify(i, 2): # increment i until the pixel (i, 2) belongs to the slider i += 1 # depends on [control=['while'], data=[]] j = i # find the last pixel of the slider while j < w and 'slider' in self.scale.identify(j, 2): # increment j until the pixel (2, j) no longer belongs to the slider j += 1 # depends on [control=['while'], data=[]] if j == i: # the length of the slider was not determined properly, # so the value of the sliderlength from the style is used self._sliderlength = self.style.lookup(self._style_name, 'sliderlength', default=30) # depends on [control=['if'], data=[]] else: # update ticks and label placement self._sliderlength = j - i self._update_display()
def get_composition_search_session_for_repository(self, repository_id, proxy): """Gets a composition search session for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionSearchSession) - a CompositionSearchSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_search() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_composition_search() and supports_visible_federation() are true. """ if repository_id is None: raise NullArgument() if not self.supports_composition_search() or not self.supports_visible_federation(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.CompositionSearchSession(repository_id, proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
def function[get_composition_search_session_for_repository, parameter[self, repository_id, proxy]]: constant[Gets a composition search session for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionSearchSession) - a CompositionSearchSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_search() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_composition_search() and supports_visible_federation() are true. ] if compare[name[repository_id] is constant[None]] begin[:] <ast.Raise object at 0x7da1b0a616f0> if <ast.BoolOp object at 0x7da1b0a63400> begin[:] <ast.Raise object at 0x7da1b0a66590> <ast.Try object at 0x7da1b0a655d0> variable[proxy] assign[=] call[name[self]._convert_proxy, parameter[name[proxy]]] <ast.Try object at 0x7da1b0949060> return[name[session]]
keyword[def] identifier[get_composition_search_session_for_repository] ( identifier[self] , identifier[repository_id] , identifier[proxy] ): literal[string] keyword[if] identifier[repository_id] keyword[is] keyword[None] : keyword[raise] identifier[NullArgument] () keyword[if] keyword[not] identifier[self] . identifier[supports_composition_search] () keyword[or] keyword[not] identifier[self] . identifier[supports_visible_federation] (): keyword[raise] identifier[Unimplemented] () keyword[try] : keyword[from] . keyword[import] identifier[sessions] keyword[except] identifier[ImportError] : keyword[raise] identifier[proxy] = identifier[self] . identifier[_convert_proxy] ( identifier[proxy] ) keyword[try] : identifier[session] = identifier[sessions] . identifier[CompositionSearchSession] ( identifier[repository_id] , identifier[proxy] , identifier[runtime] = identifier[self] . identifier[_runtime] ) keyword[except] identifier[AttributeError] : keyword[raise] keyword[return] identifier[session]
def get_composition_search_session_for_repository(self, repository_id, proxy): """Gets a composition search session for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionSearchSession) - a CompositionSearchSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_search() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_composition_search() and supports_visible_federation() are true. """ if repository_id is None: raise NullArgument() # depends on [control=['if'], data=[]] if not self.supports_composition_search() or not self.supports_visible_federation(): raise Unimplemented() # depends on [control=['if'], data=[]] try: from . import sessions # depends on [control=['try'], data=[]] except ImportError: raise # OperationFailed() # depends on [control=['except'], data=[]] proxy = self._convert_proxy(proxy) try: session = sessions.CompositionSearchSession(repository_id, proxy, runtime=self._runtime) # depends on [control=['try'], data=[]] except AttributeError: raise # OperationFailed() # depends on [control=['except'], data=[]] return session
def eval_objfn(self): r"""Compute components of objective function as well as total contribution to objective function. Data fidelity term is :math:`(1/2) \| \mathbf{x} - \mathbf{s} \|_2^2` and regularisation term is :math:`\| D \mathbf{x} \|_2^2`. """ gvr = self.obfn_gvar() dfd = np.sum(np.abs(self.Wdf * gvr)) reg = 0.5*np.linalg.norm( sl.idctii(self.Alpha*sl.dctii(self.X, axes=self.axes), axes=self.axes))**2 obj = dfd + self.lmbda*reg return (obj, dfd, reg)
def function[eval_objfn, parameter[self]]: constant[Compute components of objective function as well as total contribution to objective function. Data fidelity term is :math:`(1/2) \| \mathbf{x} - \mathbf{s} \|_2^2` and regularisation term is :math:`\| D \mathbf{x} \|_2^2`. ] variable[gvr] assign[=] call[name[self].obfn_gvar, parameter[]] variable[dfd] assign[=] call[name[np].sum, parameter[call[name[np].abs, parameter[binary_operation[name[self].Wdf * name[gvr]]]]]] variable[reg] assign[=] binary_operation[constant[0.5] * binary_operation[call[name[np].linalg.norm, parameter[call[name[sl].idctii, parameter[binary_operation[name[self].Alpha * call[name[sl].dctii, parameter[name[self].X]]]]]]] ** constant[2]]] variable[obj] assign[=] binary_operation[name[dfd] + binary_operation[name[self].lmbda * name[reg]]] return[tuple[[<ast.Name object at 0x7da1b06ea2c0>, <ast.Name object at 0x7da1b06eb700>, <ast.Name object at 0x7da1b06e9750>]]]
keyword[def] identifier[eval_objfn] ( identifier[self] ): literal[string] identifier[gvr] = identifier[self] . identifier[obfn_gvar] () identifier[dfd] = identifier[np] . identifier[sum] ( identifier[np] . identifier[abs] ( identifier[self] . identifier[Wdf] * identifier[gvr] )) identifier[reg] = literal[int] * identifier[np] . identifier[linalg] . identifier[norm] ( identifier[sl] . identifier[idctii] ( identifier[self] . identifier[Alpha] * identifier[sl] . identifier[dctii] ( identifier[self] . identifier[X] , identifier[axes] = identifier[self] . identifier[axes] ), identifier[axes] = identifier[self] . identifier[axes] ))** literal[int] identifier[obj] = identifier[dfd] + identifier[self] . identifier[lmbda] * identifier[reg] keyword[return] ( identifier[obj] , identifier[dfd] , identifier[reg] )
def eval_objfn(self): """Compute components of objective function as well as total contribution to objective function. Data fidelity term is :math:`(1/2) \\| \\mathbf{x} - \\mathbf{s} \\|_2^2` and regularisation term is :math:`\\| D \\mathbf{x} \\|_2^2`. """ gvr = self.obfn_gvar() dfd = np.sum(np.abs(self.Wdf * gvr)) reg = 0.5 * np.linalg.norm(sl.idctii(self.Alpha * sl.dctii(self.X, axes=self.axes), axes=self.axes)) ** 2 obj = dfd + self.lmbda * reg return (obj, dfd, reg)
def dict_trim_by_values(self, lower=None, upper=None): """ Filter dictionary values to a given range (inclusive). Trimming is only performed on values which can be compared to the bound values. Fails on SArrays whose data type is not ``dict``. Parameters ---------- lower : int or long or float, optional The lowest dictionary value that would be retained in the result. If not given, lower bound is not applied. upper : int or long or float, optional The highest dictionary value that would be retained in the result. If not given, upper bound is not applied. Returns ------- out : SArray An SArray of dictionary type, with each dict element trimmed according to the input criteria. See Also -------- dict_trim_by_keys Examples -------- >>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_trim_by_values(2,5) dtype: dict Rows: 2 [{'is': 5}, {'this': 2, 'cat': 5}] >>> sa.dict_trim_by_values(upper=5) dtype: dict Rows: 2 [{'this': 1, 'is': 5}, {'this': 2, 'are': 1, 'cat': 5}] """ if not (lower is None or isinstance(lower, numbers.Number)): raise TypeError("lower bound has to be a numeric value") if not (upper is None or isinstance(upper, numbers.Number)): raise TypeError("upper bound has to be a numeric value") with cython_context(): return SArray(_proxy=self.__proxy__.dict_trim_by_values(lower, upper))
def function[dict_trim_by_values, parameter[self, lower, upper]]: constant[ Filter dictionary values to a given range (inclusive). Trimming is only performed on values which can be compared to the bound values. Fails on SArrays whose data type is not ``dict``. Parameters ---------- lower : int or long or float, optional The lowest dictionary value that would be retained in the result. If not given, lower bound is not applied. upper : int or long or float, optional The highest dictionary value that would be retained in the result. If not given, upper bound is not applied. Returns ------- out : SArray An SArray of dictionary type, with each dict element trimmed according to the input criteria. See Also -------- dict_trim_by_keys Examples -------- >>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_trim_by_values(2,5) dtype: dict Rows: 2 [{'is': 5}, {'this': 2, 'cat': 5}] >>> sa.dict_trim_by_values(upper=5) dtype: dict Rows: 2 [{'this': 1, 'is': 5}, {'this': 2, 'are': 1, 'cat': 5}] ] if <ast.UnaryOp object at 0x7da1b1f0b970> begin[:] <ast.Raise object at 0x7da1b1f0be80> if <ast.UnaryOp object at 0x7da1b1f0ab00> begin[:] <ast.Raise object at 0x7da1b1f0a5c0> with call[name[cython_context], parameter[]] begin[:] return[call[name[SArray], parameter[]]]
keyword[def] identifier[dict_trim_by_values] ( identifier[self] , identifier[lower] = keyword[None] , identifier[upper] = keyword[None] ): literal[string] keyword[if] keyword[not] ( identifier[lower] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[lower] , identifier[numbers] . identifier[Number] )): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] keyword[not] ( identifier[upper] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[upper] , identifier[numbers] . identifier[Number] )): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[with] identifier[cython_context] (): keyword[return] identifier[SArray] ( identifier[_proxy] = identifier[self] . identifier[__proxy__] . identifier[dict_trim_by_values] ( identifier[lower] , identifier[upper] ))
def dict_trim_by_values(self, lower=None, upper=None): """ Filter dictionary values to a given range (inclusive). Trimming is only performed on values which can be compared to the bound values. Fails on SArrays whose data type is not ``dict``. Parameters ---------- lower : int or long or float, optional The lowest dictionary value that would be retained in the result. If not given, lower bound is not applied. upper : int or long or float, optional The highest dictionary value that would be retained in the result. If not given, upper bound is not applied. Returns ------- out : SArray An SArray of dictionary type, with each dict element trimmed according to the input criteria. See Also -------- dict_trim_by_keys Examples -------- >>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_trim_by_values(2,5) dtype: dict Rows: 2 [{'is': 5}, {'this': 2, 'cat': 5}] >>> sa.dict_trim_by_values(upper=5) dtype: dict Rows: 2 [{'this': 1, 'is': 5}, {'this': 2, 'are': 1, 'cat': 5}] """ if not (lower is None or isinstance(lower, numbers.Number)): raise TypeError('lower bound has to be a numeric value') # depends on [control=['if'], data=[]] if not (upper is None or isinstance(upper, numbers.Number)): raise TypeError('upper bound has to be a numeric value') # depends on [control=['if'], data=[]] with cython_context(): return SArray(_proxy=self.__proxy__.dict_trim_by_values(lower, upper)) # depends on [control=['with'], data=[]]
def update(self, list_id, segment_id, data): """ Update a specific segment in a list. :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param segment_id: The unique id for the segment. :type segment_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "name": string* } """ self.list_id = list_id self.segment_id = segment_id if 'name' not in data: raise KeyError('The list segment must have a name') return self._mc_client._patch(url=self._build_path(list_id, 'segments', segment_id), data=data)
def function[update, parameter[self, list_id, segment_id, data]]: constant[ Update a specific segment in a list. :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param segment_id: The unique id for the segment. :type segment_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "name": string* } ] name[self].list_id assign[=] name[list_id] name[self].segment_id assign[=] name[segment_id] if compare[constant[name] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:] <ast.Raise object at 0x7da1b006fca0> return[call[name[self]._mc_client._patch, parameter[]]]
keyword[def] identifier[update] ( identifier[self] , identifier[list_id] , identifier[segment_id] , identifier[data] ): literal[string] identifier[self] . identifier[list_id] = identifier[list_id] identifier[self] . identifier[segment_id] = identifier[segment_id] keyword[if] literal[string] keyword[not] keyword[in] identifier[data] : keyword[raise] identifier[KeyError] ( literal[string] ) keyword[return] identifier[self] . identifier[_mc_client] . identifier[_patch] ( identifier[url] = identifier[self] . identifier[_build_path] ( identifier[list_id] , literal[string] , identifier[segment_id] ), identifier[data] = identifier[data] )
def update(self, list_id, segment_id, data): """ Update a specific segment in a list. :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param segment_id: The unique id for the segment. :type segment_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "name": string* } """ self.list_id = list_id self.segment_id = segment_id if 'name' not in data: raise KeyError('The list segment must have a name') # depends on [control=['if'], data=[]] return self._mc_client._patch(url=self._build_path(list_id, 'segments', segment_id), data=data)
def set_inteface_up(devid, ifindex): """ function takest devid and ifindex of specific device and interface and issues a RESTFUL call to "undo shut" the spec ified interface on the target device. :param devid: int or str value of the target device :param ifindex: int or str value of the target interface :return: HTTP status code 204 with no values. """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() set_int_up_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/up" f_url = url + set_int_up_url payload = None r = requests.put(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents print(r.status_code) if r.status_code == 204: return r.status_code else: print("An Error has occured")
def function[set_inteface_up, parameter[devid, ifindex]]: constant[ function takest devid and ifindex of specific device and interface and issues a RESTFUL call to "undo shut" the spec ified interface on the target device. :param devid: int or str value of the target device :param ifindex: int or str value of the target interface :return: HTTP status code 204 with no values. ] if <ast.BoolOp object at 0x7da204344070> begin[:] call[name[set_imc_creds], parameter[]] variable[set_int_up_url] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[/imcrs/plat/res/device/] + call[name[str], parameter[name[devid]]]] + constant[/interface/]] + call[name[str], parameter[name[ifindex]]]] + constant[/up]] variable[f_url] assign[=] binary_operation[name[url] + name[set_int_up_url]] variable[payload] assign[=] constant[None] variable[r] assign[=] call[name[requests].put, parameter[name[f_url]]] call[name[print], parameter[name[r].status_code]] if compare[name[r].status_code equal[==] constant[204]] begin[:] return[name[r].status_code]
keyword[def] identifier[set_inteface_up] ( identifier[devid] , identifier[ifindex] ): literal[string] keyword[if] identifier[auth] keyword[is] keyword[None] keyword[or] identifier[url] keyword[is] keyword[None] : identifier[set_imc_creds] () identifier[set_int_up_url] = literal[string] + identifier[str] ( identifier[devid] )+ literal[string] + identifier[str] ( identifier[ifindex] )+ literal[string] identifier[f_url] = identifier[url] + identifier[set_int_up_url] identifier[payload] = keyword[None] identifier[r] = identifier[requests] . identifier[put] ( identifier[f_url] , identifier[auth] = identifier[auth] , identifier[headers] = identifier[headers] ) identifier[print] ( identifier[r] . identifier[status_code] ) keyword[if] identifier[r] . identifier[status_code] == literal[int] : keyword[return] identifier[r] . identifier[status_code] keyword[else] : identifier[print] ( literal[string] )
def set_inteface_up(devid, ifindex): """ function takest devid and ifindex of specific device and interface and issues a RESTFUL call to "undo shut" the spec ified interface on the target device. :param devid: int or str value of the target device :param ifindex: int or str value of the target interface :return: HTTP status code 204 with no values. """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() # depends on [control=['if'], data=[]] set_int_up_url = '/imcrs/plat/res/device/' + str(devid) + '/interface/' + str(ifindex) + '/up' f_url = url + set_int_up_url payload = None r = requests.put(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents print(r.status_code) if r.status_code == 204: return r.status_code # depends on [control=['if'], data=[]] else: print('An Error has occured')
def get_plugins_of_type(self, plugin_class): """ Retrieve a list of plugins of desired class, KeyError raised otherwise """ logger.debug("Searching for plugins: %s", plugin_class) matches = [plugin for plugin in self.plugins.values() if isinstance(plugin, plugin_class)] if matches: return matches else: raise KeyError("Requested plugin type not found: %s" % plugin_class)
def function[get_plugins_of_type, parameter[self, plugin_class]]: constant[ Retrieve a list of plugins of desired class, KeyError raised otherwise ] call[name[logger].debug, parameter[constant[Searching for plugins: %s], name[plugin_class]]] variable[matches] assign[=] <ast.ListComp object at 0x7da1b03a86a0> if name[matches] begin[:] return[name[matches]]
keyword[def] identifier[get_plugins_of_type] ( identifier[self] , identifier[plugin_class] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] , identifier[plugin_class] ) identifier[matches] =[ identifier[plugin] keyword[for] identifier[plugin] keyword[in] identifier[self] . identifier[plugins] . identifier[values] () keyword[if] identifier[isinstance] ( identifier[plugin] , identifier[plugin_class] )] keyword[if] identifier[matches] : keyword[return] identifier[matches] keyword[else] : keyword[raise] identifier[KeyError] ( literal[string] % identifier[plugin_class] )
def get_plugins_of_type(self, plugin_class): """ Retrieve a list of plugins of desired class, KeyError raised otherwise """ logger.debug('Searching for plugins: %s', plugin_class) matches = [plugin for plugin in self.plugins.values() if isinstance(plugin, plugin_class)] if matches: return matches # depends on [control=['if'], data=[]] else: raise KeyError('Requested plugin type not found: %s' % plugin_class)
def pop(self, key, default=NotDefined): # type: (Hashable, Any) -> Any """ Pop the last item for a list on the dict. Afterwards the key is removed from the dict, so additional values are discarded: >>> d = MultiValueDict({"foo": [1, 2, 3]}) >>> d.pop("foo") 1 >>> "foo" in d False :param key: the key to pop. :param default: if provided the value to return if the key was not in the dictionary. """ try: return dict.pop(self, key)[-1] except LookupError: if default is NotDefined: raise MultiValueDictKeyError(key) return default
def function[pop, parameter[self, key, default]]: constant[ Pop the last item for a list on the dict. Afterwards the key is removed from the dict, so additional values are discarded: >>> d = MultiValueDict({"foo": [1, 2, 3]}) >>> d.pop("foo") 1 >>> "foo" in d False :param key: the key to pop. :param default: if provided the value to return if the key was not in the dictionary. ] <ast.Try object at 0x7da1b26afc40>
keyword[def] identifier[pop] ( identifier[self] , identifier[key] , identifier[default] = identifier[NotDefined] ): literal[string] keyword[try] : keyword[return] identifier[dict] . identifier[pop] ( identifier[self] , identifier[key] )[- literal[int] ] keyword[except] identifier[LookupError] : keyword[if] identifier[default] keyword[is] identifier[NotDefined] : keyword[raise] identifier[MultiValueDictKeyError] ( identifier[key] ) keyword[return] identifier[default]
def pop(self, key, default=NotDefined): # type: (Hashable, Any) -> Any '\n Pop the last item for a list on the dict. Afterwards the\n key is removed from the dict, so additional values are discarded:\n >>> d = MultiValueDict({"foo": [1, 2, 3]})\n >>> d.pop("foo")\n 1\n >>> "foo" in d\n False\n\n :param key: the key to pop.\n :param default: if provided the value to return if the key was\n not in the dictionary.\n ' try: return dict.pop(self, key)[-1] # depends on [control=['try'], data=[]] except LookupError: if default is NotDefined: raise MultiValueDictKeyError(key) # depends on [control=['if'], data=[]] return default # depends on [control=['except'], data=[]]
def add_route(self, router, index=None): '''Add a new :class:`Router` to the :attr:`routes` list. ''' assert isinstance(router, Router), 'Not a valid Router' assert router is not self, 'cannot add self to children' for r in self.routes: if r == router: return r elif r._route == router._route: raise ValueError('Cannot add route %s. Already avalable' % r._route) # # Remove from previous parent if router.parent: router.parent.remove_child(router) router._parent = self if index is None: self.routes.append(router) else: self.routes.insert(index, router) return router
def function[add_route, parameter[self, router, index]]: constant[Add a new :class:`Router` to the :attr:`routes` list. ] assert[call[name[isinstance], parameter[name[router], name[Router]]]] assert[compare[name[router] is_not name[self]]] for taget[name[r]] in starred[name[self].routes] begin[:] if compare[name[r] equal[==] name[router]] begin[:] return[name[r]] if name[router].parent begin[:] call[name[router].parent.remove_child, parameter[name[router]]] name[router]._parent assign[=] name[self] if compare[name[index] is constant[None]] begin[:] call[name[self].routes.append, parameter[name[router]]] return[name[router]]
keyword[def] identifier[add_route] ( identifier[self] , identifier[router] , identifier[index] = keyword[None] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[router] , identifier[Router] ), literal[string] keyword[assert] identifier[router] keyword[is] keyword[not] identifier[self] , literal[string] keyword[for] identifier[r] keyword[in] identifier[self] . identifier[routes] : keyword[if] identifier[r] == identifier[router] : keyword[return] identifier[r] keyword[elif] identifier[r] . identifier[_route] == identifier[router] . identifier[_route] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[r] . identifier[_route] ) keyword[if] identifier[router] . identifier[parent] : identifier[router] . identifier[parent] . identifier[remove_child] ( identifier[router] ) identifier[router] . identifier[_parent] = identifier[self] keyword[if] identifier[index] keyword[is] keyword[None] : identifier[self] . identifier[routes] . identifier[append] ( identifier[router] ) keyword[else] : identifier[self] . identifier[routes] . identifier[insert] ( identifier[index] , identifier[router] ) keyword[return] identifier[router]
def add_route(self, router, index=None): """Add a new :class:`Router` to the :attr:`routes` list. """ assert isinstance(router, Router), 'Not a valid Router' assert router is not self, 'cannot add self to children' for r in self.routes: if r == router: return r # depends on [control=['if'], data=['r']] elif r._route == router._route: raise ValueError('Cannot add route %s. Already avalable' % r._route) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['r']] # # Remove from previous parent if router.parent: router.parent.remove_child(router) # depends on [control=['if'], data=[]] router._parent = self if index is None: self.routes.append(router) # depends on [control=['if'], data=[]] else: self.routes.insert(index, router) return router
def memoizedproperty(func): """ Decorator to cause a method to cache it's results in self for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> class Foo (object): ... _x = 1 ... @memoizedproperty ... def foo(self): ... self._x += 1 ... print('updating and returning {0}'.format(self._x)) ... return self._x ... >>> foo1 = Foo() >>> foo2 = Foo() >>> foo1.foo updating and returning 2 2 >>> foo1.foo 2 >>> foo2.foo updating and returning 2 2 >>> foo1.foo 2 """ inner_attname = '__%s' % func.__name__ def new_fget(self): if not hasattr(self, '_cache_'): self._cache_ = dict() cache = self._cache_ if inner_attname not in cache: cache[inner_attname] = func(self) return cache[inner_attname] return property(new_fget)
def function[memoizedproperty, parameter[func]]: constant[ Decorator to cause a method to cache it's results in self for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> class Foo (object): ... _x = 1 ... @memoizedproperty ... def foo(self): ... self._x += 1 ... print('updating and returning {0}'.format(self._x)) ... return self._x ... >>> foo1 = Foo() >>> foo2 = Foo() >>> foo1.foo updating and returning 2 2 >>> foo1.foo 2 >>> foo2.foo updating and returning 2 2 >>> foo1.foo 2 ] variable[inner_attname] assign[=] binary_operation[constant[__%s] <ast.Mod object at 0x7da2590d6920> name[func].__name__] def function[new_fget, parameter[self]]: if <ast.UnaryOp object at 0x7da1b19ce710> begin[:] name[self]._cache_ assign[=] call[name[dict], parameter[]] variable[cache] assign[=] name[self]._cache_ if compare[name[inner_attname] <ast.NotIn object at 0x7da2590d7190> name[cache]] begin[:] call[name[cache]][name[inner_attname]] assign[=] call[name[func], parameter[name[self]]] return[call[name[cache]][name[inner_attname]]] return[call[name[property], parameter[name[new_fget]]]]
keyword[def] identifier[memoizedproperty] ( identifier[func] ): literal[string] identifier[inner_attname] = literal[string] % identifier[func] . identifier[__name__] keyword[def] identifier[new_fget] ( identifier[self] ): keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_cache_] = identifier[dict] () identifier[cache] = identifier[self] . identifier[_cache_] keyword[if] identifier[inner_attname] keyword[not] keyword[in] identifier[cache] : identifier[cache] [ identifier[inner_attname] ]= identifier[func] ( identifier[self] ) keyword[return] identifier[cache] [ identifier[inner_attname] ] keyword[return] identifier[property] ( identifier[new_fget] )
def memoizedproperty(func): """ Decorator to cause a method to cache it's results in self for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> class Foo (object): ... _x = 1 ... @memoizedproperty ... def foo(self): ... self._x += 1 ... print('updating and returning {0}'.format(self._x)) ... return self._x ... >>> foo1 = Foo() >>> foo2 = Foo() >>> foo1.foo updating and returning 2 2 >>> foo1.foo 2 >>> foo2.foo updating and returning 2 2 >>> foo1.foo 2 """ inner_attname = '__%s' % func.__name__ def new_fget(self): if not hasattr(self, '_cache_'): self._cache_ = dict() # depends on [control=['if'], data=[]] cache = self._cache_ if inner_attname not in cache: cache[inner_attname] = func(self) # depends on [control=['if'], data=['inner_attname', 'cache']] return cache[inner_attname] return property(new_fget)
def _dataset_report( self, dataset, dataset_validation, dataset_index, catalog_fields, harvest='none', report=None, catalog_homepage=None ): """ Genera una línea del `catalog_report`, correspondiente a un dataset de los que conforman el catálogo analizado.""" # hace un breve análisis de qa al dataset good_qa, notes = self._dataset_qa(dataset) dataset_report = OrderedDict(catalog_fields) dataset_report["valid_dataset_metadata"] = ( 1 if dataset_validation["status"] == "OK" else 0) dataset_report["dataset_index"] = dataset_index if isinstance(harvest, list): dataset_report["harvest"] = 1 if dataset["title"] in harvest else 0 elif harvest == 'all': dataset_report["harvest"] = 1 elif harvest == 'none': dataset_report["harvest"] = 0 elif harvest == 'valid': dataset_report["harvest"] = ( int(dataset_report["valid_dataset_metadata"])) elif harvest == 'good': valid_metadata = int(dataset_report["valid_dataset_metadata"]) == 1 dataset_report["harvest"] = 1 if valid_metadata and good_qa else 0 elif harvest == 'report': if not report: raise ValueError(""" Usted eligio 'report' como criterio de harvest, pero no proveyo un valor para el argumento 'report'. Por favor, intentelo nuevamente.""") datasets_to_harvest = self._extract_datasets_to_harvest(report) dataset_report["harvest"] = ( 1 if (dataset_report["catalog_metadata_url"], dataset.get("title")) in datasets_to_harvest else 0) else: raise ValueError(""" {} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o 'report'.""".format(harvest)) dataset_report.update( self._dataset_report_helper( dataset, catalog_homepage=catalog_homepage) ) dataset_report["notas"] = "\n\n".join(notes) return dataset_report.copy()
def function[_dataset_report, parameter[self, dataset, dataset_validation, dataset_index, catalog_fields, harvest, report, catalog_homepage]]: constant[ Genera una línea del `catalog_report`, correspondiente a un dataset de los que conforman el catálogo analizado.] <ast.Tuple object at 0x7da1b044e9b0> assign[=] call[name[self]._dataset_qa, parameter[name[dataset]]] variable[dataset_report] assign[=] call[name[OrderedDict], parameter[name[catalog_fields]]] call[name[dataset_report]][constant[valid_dataset_metadata]] assign[=] <ast.IfExp object at 0x7da1b044eb60> call[name[dataset_report]][constant[dataset_index]] assign[=] name[dataset_index] if call[name[isinstance], parameter[name[harvest], name[list]]] begin[:] call[name[dataset_report]][constant[harvest]] assign[=] <ast.IfExp object at 0x7da1b044c1c0> call[name[dataset_report].update, parameter[call[name[self]._dataset_report_helper, parameter[name[dataset]]]]] call[name[dataset_report]][constant[notas]] assign[=] call[constant[ ].join, parameter[name[notes]]] return[call[name[dataset_report].copy, parameter[]]]
keyword[def] identifier[_dataset_report] ( identifier[self] , identifier[dataset] , identifier[dataset_validation] , identifier[dataset_index] , identifier[catalog_fields] , identifier[harvest] = literal[string] , identifier[report] = keyword[None] , identifier[catalog_homepage] = keyword[None] ): literal[string] identifier[good_qa] , identifier[notes] = identifier[self] . identifier[_dataset_qa] ( identifier[dataset] ) identifier[dataset_report] = identifier[OrderedDict] ( identifier[catalog_fields] ) identifier[dataset_report] [ literal[string] ]=( literal[int] keyword[if] identifier[dataset_validation] [ literal[string] ]== literal[string] keyword[else] literal[int] ) identifier[dataset_report] [ literal[string] ]= identifier[dataset_index] keyword[if] identifier[isinstance] ( identifier[harvest] , identifier[list] ): identifier[dataset_report] [ literal[string] ]= literal[int] keyword[if] identifier[dataset] [ literal[string] ] keyword[in] identifier[harvest] keyword[else] literal[int] keyword[elif] identifier[harvest] == literal[string] : identifier[dataset_report] [ literal[string] ]= literal[int] keyword[elif] identifier[harvest] == literal[string] : identifier[dataset_report] [ literal[string] ]= literal[int] keyword[elif] identifier[harvest] == literal[string] : identifier[dataset_report] [ literal[string] ]=( identifier[int] ( identifier[dataset_report] [ literal[string] ])) keyword[elif] identifier[harvest] == literal[string] : identifier[valid_metadata] = identifier[int] ( identifier[dataset_report] [ literal[string] ])== literal[int] identifier[dataset_report] [ literal[string] ]= literal[int] keyword[if] identifier[valid_metadata] keyword[and] identifier[good_qa] keyword[else] literal[int] keyword[elif] identifier[harvest] == literal[string] : keyword[if] keyword[not] identifier[report] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[datasets_to_harvest] = identifier[self] . identifier[_extract_datasets_to_harvest] ( identifier[report] ) identifier[dataset_report] [ literal[string] ]=( literal[int] keyword[if] ( identifier[dataset_report] [ literal[string] ], identifier[dataset] . identifier[get] ( literal[string] )) keyword[in] identifier[datasets_to_harvest] keyword[else] literal[int] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[harvest] )) identifier[dataset_report] . identifier[update] ( identifier[self] . identifier[_dataset_report_helper] ( identifier[dataset] , identifier[catalog_homepage] = identifier[catalog_homepage] ) ) identifier[dataset_report] [ literal[string] ]= literal[string] . identifier[join] ( identifier[notes] ) keyword[return] identifier[dataset_report] . identifier[copy] ()
def _dataset_report(self, dataset, dataset_validation, dataset_index, catalog_fields, harvest='none', report=None, catalog_homepage=None): """ Genera una línea del `catalog_report`, correspondiente a un dataset de los que conforman el catálogo analizado.""" # hace un breve análisis de qa al dataset (good_qa, notes) = self._dataset_qa(dataset) dataset_report = OrderedDict(catalog_fields) dataset_report['valid_dataset_metadata'] = 1 if dataset_validation['status'] == 'OK' else 0 dataset_report['dataset_index'] = dataset_index if isinstance(harvest, list): dataset_report['harvest'] = 1 if dataset['title'] in harvest else 0 # depends on [control=['if'], data=[]] elif harvest == 'all': dataset_report['harvest'] = 1 # depends on [control=['if'], data=[]] elif harvest == 'none': dataset_report['harvest'] = 0 # depends on [control=['if'], data=[]] elif harvest == 'valid': dataset_report['harvest'] = int(dataset_report['valid_dataset_metadata']) # depends on [control=['if'], data=[]] elif harvest == 'good': valid_metadata = int(dataset_report['valid_dataset_metadata']) == 1 dataset_report['harvest'] = 1 if valid_metadata and good_qa else 0 # depends on [control=['if'], data=[]] elif harvest == 'report': if not report: raise ValueError("\nUsted eligio 'report' como criterio de harvest, pero no proveyo un valor para\nel argumento 'report'. Por favor, intentelo nuevamente.") # depends on [control=['if'], data=[]] datasets_to_harvest = self._extract_datasets_to_harvest(report) dataset_report['harvest'] = 1 if (dataset_report['catalog_metadata_url'], dataset.get('title')) in datasets_to_harvest else 0 # depends on [control=['if'], data=[]] else: raise ValueError("\n{} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o\n'report'.".format(harvest)) dataset_report.update(self._dataset_report_helper(dataset, catalog_homepage=catalog_homepage)) dataset_report['notas'] = '\n\n'.join(notes) return dataset_report.copy()
def create(self, ontology=None,subject_category=None,object_category=None,evidence=None,taxon=None,relation=None, file=None, fmt=None, skim=True): """ creates an AssociationSet Currently, this uses an eager binding to a `ontobio.golr` instance. All compact associations for the particular combination of parameters are fetched. Arguments --------- ontology: an `Ontology` object subject_category: string representing category of subjects (e.g. gene, disease, variant) object_category: string representing category of objects (e.g. function, phenotype, disease) taxon: string holding NCBITaxon:nnnn ID """ meta = AssociationSetMetadata(subject_category=subject_category, object_category=object_category, taxon=taxon) if file is not None: return self.create_from_file(file=file, fmt=fmt, ontology=ontology, meta=meta, skim=skim) logging.info("Fetching assocs from store") assocs = bulk_fetch_cached(subject_category=subject_category, object_category=object_category, evidence=evidence, taxon=taxon) logging.info("Creating map for {} subjects".format(len(assocs))) amap = {} subject_label_map = {} for a in assocs: rel = a['relation'] subj = a['subject'] subject_label_map[subj] = a['subject_label'] amap[subj] = a['objects'] aset = AssociationSet(ontology=ontology, meta=meta, subject_label_map=subject_label_map, association_map=amap) return aset
def function[create, parameter[self, ontology, subject_category, object_category, evidence, taxon, relation, file, fmt, skim]]: constant[ creates an AssociationSet Currently, this uses an eager binding to a `ontobio.golr` instance. All compact associations for the particular combination of parameters are fetched. Arguments --------- ontology: an `Ontology` object subject_category: string representing category of subjects (e.g. gene, disease, variant) object_category: string representing category of objects (e.g. function, phenotype, disease) taxon: string holding NCBITaxon:nnnn ID ] variable[meta] assign[=] call[name[AssociationSetMetadata], parameter[]] if compare[name[file] is_not constant[None]] begin[:] return[call[name[self].create_from_file, parameter[]]] call[name[logging].info, parameter[constant[Fetching assocs from store]]] variable[assocs] assign[=] call[name[bulk_fetch_cached], parameter[]] call[name[logging].info, parameter[call[constant[Creating map for {} subjects].format, parameter[call[name[len], parameter[name[assocs]]]]]]] variable[amap] assign[=] dictionary[[], []] variable[subject_label_map] assign[=] dictionary[[], []] for taget[name[a]] in starred[name[assocs]] begin[:] variable[rel] assign[=] call[name[a]][constant[relation]] variable[subj] assign[=] call[name[a]][constant[subject]] call[name[subject_label_map]][name[subj]] assign[=] call[name[a]][constant[subject_label]] call[name[amap]][name[subj]] assign[=] call[name[a]][constant[objects]] variable[aset] assign[=] call[name[AssociationSet], parameter[]] return[name[aset]]
keyword[def] identifier[create] ( identifier[self] , identifier[ontology] = keyword[None] , identifier[subject_category] = keyword[None] , identifier[object_category] = keyword[None] , identifier[evidence] = keyword[None] , identifier[taxon] = keyword[None] , identifier[relation] = keyword[None] , identifier[file] = keyword[None] , identifier[fmt] = keyword[None] , identifier[skim] = keyword[True] ): literal[string] identifier[meta] = identifier[AssociationSetMetadata] ( identifier[subject_category] = identifier[subject_category] , identifier[object_category] = identifier[object_category] , identifier[taxon] = identifier[taxon] ) keyword[if] identifier[file] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[self] . identifier[create_from_file] ( identifier[file] = identifier[file] , identifier[fmt] = identifier[fmt] , identifier[ontology] = identifier[ontology] , identifier[meta] = identifier[meta] , identifier[skim] = identifier[skim] ) identifier[logging] . identifier[info] ( literal[string] ) identifier[assocs] = identifier[bulk_fetch_cached] ( identifier[subject_category] = identifier[subject_category] , identifier[object_category] = identifier[object_category] , identifier[evidence] = identifier[evidence] , identifier[taxon] = identifier[taxon] ) identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[assocs] ))) identifier[amap] ={} identifier[subject_label_map] ={} keyword[for] identifier[a] keyword[in] identifier[assocs] : identifier[rel] = identifier[a] [ literal[string] ] identifier[subj] = identifier[a] [ literal[string] ] identifier[subject_label_map] [ identifier[subj] ]= identifier[a] [ literal[string] ] identifier[amap] [ identifier[subj] ]= identifier[a] [ literal[string] ] identifier[aset] = identifier[AssociationSet] ( identifier[ontology] = identifier[ontology] , identifier[meta] = identifier[meta] , identifier[subject_label_map] = identifier[subject_label_map] , identifier[association_map] = identifier[amap] ) keyword[return] identifier[aset]
def create(self, ontology=None, subject_category=None, object_category=None, evidence=None, taxon=None, relation=None, file=None, fmt=None, skim=True): """ creates an AssociationSet Currently, this uses an eager binding to a `ontobio.golr` instance. All compact associations for the particular combination of parameters are fetched. Arguments --------- ontology: an `Ontology` object subject_category: string representing category of subjects (e.g. gene, disease, variant) object_category: string representing category of objects (e.g. function, phenotype, disease) taxon: string holding NCBITaxon:nnnn ID """ meta = AssociationSetMetadata(subject_category=subject_category, object_category=object_category, taxon=taxon) if file is not None: return self.create_from_file(file=file, fmt=fmt, ontology=ontology, meta=meta, skim=skim) # depends on [control=['if'], data=['file']] logging.info('Fetching assocs from store') assocs = bulk_fetch_cached(subject_category=subject_category, object_category=object_category, evidence=evidence, taxon=taxon) logging.info('Creating map for {} subjects'.format(len(assocs))) amap = {} subject_label_map = {} for a in assocs: rel = a['relation'] subj = a['subject'] subject_label_map[subj] = a['subject_label'] amap[subj] = a['objects'] # depends on [control=['for'], data=['a']] aset = AssociationSet(ontology=ontology, meta=meta, subject_label_map=subject_label_map, association_map=amap) return aset
def emit(self, record): """ Throws an error based on the information that the logger reported, given the logging level. :param record: <logging.LogRecord> """ if not logging.raiseExceptions: return logger = logging.getLogger(record.name) # raise an exception based on the error logging if logger.level <= record.levelno: err = record.msg[0] if not isinstance(err, Exception): err = ProjexError(nstr(record.msg)) # log the traceback info data = record.__dict__.copy() data['type'] = type(err).__name__ msg = ERROR_MESSAGE % data sys.stderr.write(msg) raise err
def function[emit, parameter[self, record]]: constant[ Throws an error based on the information that the logger reported, given the logging level. :param record: <logging.LogRecord> ] if <ast.UnaryOp object at 0x7da1b28840d0> begin[:] return[None] variable[logger] assign[=] call[name[logging].getLogger, parameter[name[record].name]] if compare[name[logger].level less_or_equal[<=] name[record].levelno] begin[:] variable[err] assign[=] call[name[record].msg][constant[0]] if <ast.UnaryOp object at 0x7da1b2886320> begin[:] variable[err] assign[=] call[name[ProjexError], parameter[call[name[nstr], parameter[name[record].msg]]]] variable[data] assign[=] call[name[record].__dict__.copy, parameter[]] call[name[data]][constant[type]] assign[=] call[name[type], parameter[name[err]]].__name__ variable[msg] assign[=] binary_operation[name[ERROR_MESSAGE] <ast.Mod object at 0x7da2590d6920> name[data]] call[name[sys].stderr.write, parameter[name[msg]]] <ast.Raise object at 0x7da1b28fafe0>
keyword[def] identifier[emit] ( identifier[self] , identifier[record] ): literal[string] keyword[if] keyword[not] identifier[logging] . identifier[raiseExceptions] : keyword[return] identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[record] . identifier[name] ) keyword[if] identifier[logger] . identifier[level] <= identifier[record] . identifier[levelno] : identifier[err] = identifier[record] . identifier[msg] [ literal[int] ] keyword[if] keyword[not] identifier[isinstance] ( identifier[err] , identifier[Exception] ): identifier[err] = identifier[ProjexError] ( identifier[nstr] ( identifier[record] . identifier[msg] )) identifier[data] = identifier[record] . identifier[__dict__] . identifier[copy] () identifier[data] [ literal[string] ]= identifier[type] ( identifier[err] ). identifier[__name__] identifier[msg] = identifier[ERROR_MESSAGE] % identifier[data] identifier[sys] . identifier[stderr] . identifier[write] ( identifier[msg] ) keyword[raise] identifier[err]
def emit(self, record): """ Throws an error based on the information that the logger reported, given the logging level. :param record: <logging.LogRecord> """ if not logging.raiseExceptions: return # depends on [control=['if'], data=[]] logger = logging.getLogger(record.name) # raise an exception based on the error logging if logger.level <= record.levelno: err = record.msg[0] if not isinstance(err, Exception): err = ProjexError(nstr(record.msg)) # depends on [control=['if'], data=[]] # log the traceback info data = record.__dict__.copy() data['type'] = type(err).__name__ msg = ERROR_MESSAGE % data sys.stderr.write(msg) raise err # depends on [control=['if'], data=[]]
def exact_match(self): """Returns the symbol under the cursor looking both directions as part of a definition lookup for an exact match. """ #We don't have to worry about grouping or anything else fancy. Just #loop through forward and back until we hit a character that can't be #part of a variable or function name. if self._exact_match is None: i = self.pos[1] - 1 start = None end = None line = self.current_line terminators = ['(', ')', '\n', ' ', '=', '%', ','] while i >= 0 and start is None: if line[i] in terminators: start = i + 1 i -= 1 i = self.pos[1] while i < len(line) and end is None: if line[i] in terminators: end = i i += 1 self._exact_match = line[start:end].lower() return self._exact_match
def function[exact_match, parameter[self]]: constant[Returns the symbol under the cursor looking both directions as part of a definition lookup for an exact match. ] if compare[name[self]._exact_match is constant[None]] begin[:] variable[i] assign[=] binary_operation[call[name[self].pos][constant[1]] - constant[1]] variable[start] assign[=] constant[None] variable[end] assign[=] constant[None] variable[line] assign[=] name[self].current_line variable[terminators] assign[=] list[[<ast.Constant object at 0x7da20e954850>, <ast.Constant object at 0x7da20e956710>, <ast.Constant object at 0x7da20e954520>, <ast.Constant object at 0x7da20e9542e0>, <ast.Constant object at 0x7da20e956c80>, <ast.Constant object at 0x7da20e957400>, <ast.Constant object at 0x7da20e9570a0>]] while <ast.BoolOp object at 0x7da20e9572e0> begin[:] if compare[call[name[line]][name[i]] in name[terminators]] begin[:] variable[start] assign[=] binary_operation[name[i] + constant[1]] <ast.AugAssign object at 0x7da20e9548b0> variable[i] assign[=] call[name[self].pos][constant[1]] while <ast.BoolOp object at 0x7da20e956380> begin[:] if compare[call[name[line]][name[i]] in name[terminators]] begin[:] variable[end] assign[=] name[i] <ast.AugAssign object at 0x7da20e954940> name[self]._exact_match assign[=] call[call[name[line]][<ast.Slice object at 0x7da20e957190>].lower, parameter[]] return[name[self]._exact_match]
keyword[def] identifier[exact_match] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_exact_match] keyword[is] keyword[None] : identifier[i] = identifier[self] . identifier[pos] [ literal[int] ]- literal[int] identifier[start] = keyword[None] identifier[end] = keyword[None] identifier[line] = identifier[self] . identifier[current_line] identifier[terminators] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[while] identifier[i] >= literal[int] keyword[and] identifier[start] keyword[is] keyword[None] : keyword[if] identifier[line] [ identifier[i] ] keyword[in] identifier[terminators] : identifier[start] = identifier[i] + literal[int] identifier[i] -= literal[int] identifier[i] = identifier[self] . identifier[pos] [ literal[int] ] keyword[while] identifier[i] < identifier[len] ( identifier[line] ) keyword[and] identifier[end] keyword[is] keyword[None] : keyword[if] identifier[line] [ identifier[i] ] keyword[in] identifier[terminators] : identifier[end] = identifier[i] identifier[i] += literal[int] identifier[self] . identifier[_exact_match] = identifier[line] [ identifier[start] : identifier[end] ]. identifier[lower] () keyword[return] identifier[self] . identifier[_exact_match]
def exact_match(self): """Returns the symbol under the cursor looking both directions as part of a definition lookup for an exact match. """ #We don't have to worry about grouping or anything else fancy. Just #loop through forward and back until we hit a character that can't be #part of a variable or function name. if self._exact_match is None: i = self.pos[1] - 1 start = None end = None line = self.current_line terminators = ['(', ')', '\n', ' ', '=', '%', ','] while i >= 0 and start is None: if line[i] in terminators: start = i + 1 # depends on [control=['if'], data=[]] i -= 1 # depends on [control=['while'], data=[]] i = self.pos[1] while i < len(line) and end is None: if line[i] in terminators: end = i # depends on [control=['if'], data=[]] i += 1 # depends on [control=['while'], data=[]] self._exact_match = line[start:end].lower() # depends on [control=['if'], data=[]] return self._exact_match
def get_all(self, qry, tpl): ''' get all rows for a query ''' self.cur.execute(qry, tpl) result = self.cur.fetchall() return result
def function[get_all, parameter[self, qry, tpl]]: constant[ get all rows for a query ] call[name[self].cur.execute, parameter[name[qry], name[tpl]]] variable[result] assign[=] call[name[self].cur.fetchall, parameter[]] return[name[result]]
keyword[def] identifier[get_all] ( identifier[self] , identifier[qry] , identifier[tpl] ): literal[string] identifier[self] . identifier[cur] . identifier[execute] ( identifier[qry] , identifier[tpl] ) identifier[result] = identifier[self] . identifier[cur] . identifier[fetchall] () keyword[return] identifier[result]
def get_all(self, qry, tpl): """ get all rows for a query """ self.cur.execute(qry, tpl) result = self.cur.fetchall() return result
def indent_width(self, value): """ Setter for **self.__indent_width** attribute. :param value: Attribute value. :type value: int """ if value is not None: assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format("indent_width", value) self.__indent_width = value
def function[indent_width, parameter[self, value]]: constant[ Setter for **self.__indent_width** attribute. :param value: Attribute value. :type value: int ] if compare[name[value] is_not constant[None]] begin[:] assert[compare[call[name[type], parameter[name[value]]] is name[int]]] name[self].__indent_width assign[=] name[value]
keyword[def] identifier[indent_width] ( identifier[self] , identifier[value] ): literal[string] keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] : keyword[assert] identifier[type] ( identifier[value] ) keyword[is] identifier[int] , literal[string] . identifier[format] ( literal[string] , identifier[value] ) identifier[self] . identifier[__indent_width] = identifier[value]
def indent_width(self, value): """ Setter for **self.__indent_width** attribute. :param value: Attribute value. :type value: int """ if value is not None: assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format('indent_width', value) # depends on [control=['if'], data=['value']] self.__indent_width = value
def preconstrain_flag_page(self, magic_content): """ Preconstrain the data in the flag page. :param magic_content: The content of the magic page as a bytestring. """ for m, v in zip(magic_content, self.state.cgc.flag_bytes): self.preconstrain(m, v)
def function[preconstrain_flag_page, parameter[self, magic_content]]: constant[ Preconstrain the data in the flag page. :param magic_content: The content of the magic page as a bytestring. ] for taget[tuple[[<ast.Name object at 0x7da20c7ca050>, <ast.Name object at 0x7da20c7c9c00>]]] in starred[call[name[zip], parameter[name[magic_content], name[self].state.cgc.flag_bytes]]] begin[:] call[name[self].preconstrain, parameter[name[m], name[v]]]
keyword[def] identifier[preconstrain_flag_page] ( identifier[self] , identifier[magic_content] ): literal[string] keyword[for] identifier[m] , identifier[v] keyword[in] identifier[zip] ( identifier[magic_content] , identifier[self] . identifier[state] . identifier[cgc] . identifier[flag_bytes] ): identifier[self] . identifier[preconstrain] ( identifier[m] , identifier[v] )
def preconstrain_flag_page(self, magic_content): """ Preconstrain the data in the flag page. :param magic_content: The content of the magic page as a bytestring. """ for (m, v) in zip(magic_content, self.state.cgc.flag_bytes): self.preconstrain(m, v) # depends on [control=['for'], data=[]]
def dump(self, stream): """Serialize self to text stream. Matches convention of mongooplog. """ items = ( ('time', self.time), ('inc', self.inc), ) # use ordered dict to retain order ts = collections.OrderedDict(items) json.dump(dict(ts=ts), stream)
def function[dump, parameter[self, stream]]: constant[Serialize self to text stream. Matches convention of mongooplog. ] variable[items] assign[=] tuple[[<ast.Tuple object at 0x7da1b24049a0>, <ast.Tuple object at 0x7da1b24063b0>]] variable[ts] assign[=] call[name[collections].OrderedDict, parameter[name[items]]] call[name[json].dump, parameter[call[name[dict], parameter[]], name[stream]]]
keyword[def] identifier[dump] ( identifier[self] , identifier[stream] ): literal[string] identifier[items] =( ( literal[string] , identifier[self] . identifier[time] ), ( literal[string] , identifier[self] . identifier[inc] ), ) identifier[ts] = identifier[collections] . identifier[OrderedDict] ( identifier[items] ) identifier[json] . identifier[dump] ( identifier[dict] ( identifier[ts] = identifier[ts] ), identifier[stream] )
def dump(self, stream): """Serialize self to text stream. Matches convention of mongooplog. """ items = (('time', self.time), ('inc', self.inc)) # use ordered dict to retain order ts = collections.OrderedDict(items) json.dump(dict(ts=ts), stream)
def _wait_for_response(self): """ Wait until the user accepted or rejected the request """ while not self.server.response_code: time.sleep(2) time.sleep(5) self.server.shutdown()
def function[_wait_for_response, parameter[self]]: constant[ Wait until the user accepted or rejected the request ] while <ast.UnaryOp object at 0x7da1b0328340> begin[:] call[name[time].sleep, parameter[constant[2]]] call[name[time].sleep, parameter[constant[5]]] call[name[self].server.shutdown, parameter[]]
keyword[def] identifier[_wait_for_response] ( identifier[self] ): literal[string] keyword[while] keyword[not] identifier[self] . identifier[server] . identifier[response_code] : identifier[time] . identifier[sleep] ( literal[int] ) identifier[time] . identifier[sleep] ( literal[int] ) identifier[self] . identifier[server] . identifier[shutdown] ()
def _wait_for_response(self): """ Wait until the user accepted or rejected the request """ while not self.server.response_code: time.sleep(2) # depends on [control=['while'], data=[]] time.sleep(5) self.server.shutdown()
def __driver_stub(self, text, state): """Display help messages or invoke the proper completer. The interface of helper methods and completer methods are documented in the helper() decorator method and the completer() decorator method, respectively. Arguments: text: A string, that is the current completion scope. state: An integer. Returns: A string used to replace the given text, if any. None if no completion candidates are found. Raises: This method is called via the readline callback. If this method raises an error, it is silently ignored by the readline library. This behavior makes debugging very difficult. For this reason, non-driver methods are run within try-except blocks. When an error occurs, the stack trace is printed to self.stderr. """ origline = readline.get_line_buffer() line = origline.lstrip() if line and line[-1] == '?': self.__driver_helper(line) else: toks = shlex.split(line) return self.__driver_completer(toks, text, state)
def function[__driver_stub, parameter[self, text, state]]: constant[Display help messages or invoke the proper completer. The interface of helper methods and completer methods are documented in the helper() decorator method and the completer() decorator method, respectively. Arguments: text: A string, that is the current completion scope. state: An integer. Returns: A string used to replace the given text, if any. None if no completion candidates are found. Raises: This method is called via the readline callback. If this method raises an error, it is silently ignored by the readline library. This behavior makes debugging very difficult. For this reason, non-driver methods are run within try-except blocks. When an error occurs, the stack trace is printed to self.stderr. ] variable[origline] assign[=] call[name[readline].get_line_buffer, parameter[]] variable[line] assign[=] call[name[origline].lstrip, parameter[]] if <ast.BoolOp object at 0x7da2041dae90> begin[:] call[name[self].__driver_helper, parameter[name[line]]]
keyword[def] identifier[__driver_stub] ( identifier[self] , identifier[text] , identifier[state] ): literal[string] identifier[origline] = identifier[readline] . identifier[get_line_buffer] () identifier[line] = identifier[origline] . identifier[lstrip] () keyword[if] identifier[line] keyword[and] identifier[line] [- literal[int] ]== literal[string] : identifier[self] . identifier[__driver_helper] ( identifier[line] ) keyword[else] : identifier[toks] = identifier[shlex] . identifier[split] ( identifier[line] ) keyword[return] identifier[self] . identifier[__driver_completer] ( identifier[toks] , identifier[text] , identifier[state] )
def __driver_stub(self, text, state): """Display help messages or invoke the proper completer. The interface of helper methods and completer methods are documented in the helper() decorator method and the completer() decorator method, respectively. Arguments: text: A string, that is the current completion scope. state: An integer. Returns: A string used to replace the given text, if any. None if no completion candidates are found. Raises: This method is called via the readline callback. If this method raises an error, it is silently ignored by the readline library. This behavior makes debugging very difficult. For this reason, non-driver methods are run within try-except blocks. When an error occurs, the stack trace is printed to self.stderr. """ origline = readline.get_line_buffer() line = origline.lstrip() if line and line[-1] == '?': self.__driver_helper(line) # depends on [control=['if'], data=[]] else: toks = shlex.split(line) return self.__driver_completer(toks, text, state)
def fullversion(): ''' Return all version info from lvm version CLI Example: .. code-block:: bash salt '*' lvm.fullversion ''' ret = {} cmd = 'lvm version' out = __salt__['cmd.run'](cmd).splitlines() for line in out: comps = line.split(':') ret[comps[0].strip()] = comps[1].strip() return ret
def function[fullversion, parameter[]]: constant[ Return all version info from lvm version CLI Example: .. code-block:: bash salt '*' lvm.fullversion ] variable[ret] assign[=] dictionary[[], []] variable[cmd] assign[=] constant[lvm version] variable[out] assign[=] call[call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]].splitlines, parameter[]] for taget[name[line]] in starred[name[out]] begin[:] variable[comps] assign[=] call[name[line].split, parameter[constant[:]]] call[name[ret]][call[call[name[comps]][constant[0]].strip, parameter[]]] assign[=] call[call[name[comps]][constant[1]].strip, parameter[]] return[name[ret]]
keyword[def] identifier[fullversion] (): literal[string] identifier[ret] ={} identifier[cmd] = literal[string] identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] ). identifier[splitlines] () keyword[for] identifier[line] keyword[in] identifier[out] : identifier[comps] = identifier[line] . identifier[split] ( literal[string] ) identifier[ret] [ identifier[comps] [ literal[int] ]. identifier[strip] ()]= identifier[comps] [ literal[int] ]. identifier[strip] () keyword[return] identifier[ret]
def fullversion(): """ Return all version info from lvm version CLI Example: .. code-block:: bash salt '*' lvm.fullversion """ ret = {} cmd = 'lvm version' out = __salt__['cmd.run'](cmd).splitlines() for line in out: comps = line.split(':') ret[comps[0].strip()] = comps[1].strip() # depends on [control=['for'], data=['line']] return ret
def crypto_secretstream_xchacha20poly1305_push( state, m, ad=None, tag=crypto_secretstream_xchacha20poly1305_TAG_MESSAGE, ): """ Add an encrypted message to the secret stream. :param state: a secretstream state object :type state: crypto_secretstream_xchacha20poly1305_state :param m: the message to encrypt, the maximum length of an individual message is :data:`.crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX`. :type m: bytes :param ad: additional data to include in the authentication tag :type ad: bytes or None :param tag: the message tag, usually :data:`.crypto_secretstream_xchacha20poly1305_TAG_MESSAGE` or :data:`.crypto_secretstream_xchacha20poly1305_TAG_FINAL`. :type tag: int :return: ciphertext :rtype: bytes """ ensure( isinstance(state, crypto_secretstream_xchacha20poly1305_state), 'State must be a crypto_secretstream_xchacha20poly1305_state object', raising=exc.TypeError, ) ensure(isinstance(m, bytes), 'Message is not bytes', raising=exc.TypeError) ensure( len(m) <= crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX, 'Message is too long', raising=exc.ValueError, ) ensure( ad is None or isinstance(ad, bytes), 'Additional data must be bytes or None', raising=exc.TypeError, ) clen = len(m) + crypto_secretstream_xchacha20poly1305_ABYTES if state.rawbuf is None or len(state.rawbuf) < clen: state.rawbuf = ffi.new('unsigned char[]', clen) if ad is None: ad = ffi.NULL adlen = 0 else: adlen = len(ad) rc = lib.crypto_secretstream_xchacha20poly1305_push( state.statebuf, state.rawbuf, ffi.NULL, m, len(m), ad, adlen, tag, ) ensure(rc == 0, 'Unexpected failure', raising=exc.RuntimeError) return ffi.buffer(state.rawbuf, clen)[:]
def function[crypto_secretstream_xchacha20poly1305_push, parameter[state, m, ad, tag]]: constant[ Add an encrypted message to the secret stream. :param state: a secretstream state object :type state: crypto_secretstream_xchacha20poly1305_state :param m: the message to encrypt, the maximum length of an individual message is :data:`.crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX`. :type m: bytes :param ad: additional data to include in the authentication tag :type ad: bytes or None :param tag: the message tag, usually :data:`.crypto_secretstream_xchacha20poly1305_TAG_MESSAGE` or :data:`.crypto_secretstream_xchacha20poly1305_TAG_FINAL`. :type tag: int :return: ciphertext :rtype: bytes ] call[name[ensure], parameter[call[name[isinstance], parameter[name[state], name[crypto_secretstream_xchacha20poly1305_state]]], constant[State must be a crypto_secretstream_xchacha20poly1305_state object]]] call[name[ensure], parameter[call[name[isinstance], parameter[name[m], name[bytes]]], constant[Message is not bytes]]] call[name[ensure], parameter[compare[call[name[len], parameter[name[m]]] less_or_equal[<=] name[crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX]], constant[Message is too long]]] call[name[ensure], parameter[<ast.BoolOp object at 0x7da1b1da1c60>, constant[Additional data must be bytes or None]]] variable[clen] assign[=] binary_operation[call[name[len], parameter[name[m]]] + name[crypto_secretstream_xchacha20poly1305_ABYTES]] if <ast.BoolOp object at 0x7da207f01600> begin[:] name[state].rawbuf assign[=] call[name[ffi].new, parameter[constant[unsigned char[]], name[clen]]] if compare[name[ad] is constant[None]] begin[:] variable[ad] assign[=] name[ffi].NULL variable[adlen] assign[=] constant[0] variable[rc] assign[=] call[name[lib].crypto_secretstream_xchacha20poly1305_push, parameter[name[state].statebuf, name[state].rawbuf, name[ffi].NULL, name[m], call[name[len], parameter[name[m]]], name[ad], name[adlen], name[tag]]] call[name[ensure], parameter[compare[name[rc] equal[==] constant[0]], constant[Unexpected failure]]] return[call[call[name[ffi].buffer, parameter[name[state].rawbuf, name[clen]]]][<ast.Slice object at 0x7da207f01e70>]]
keyword[def] identifier[crypto_secretstream_xchacha20poly1305_push] ( identifier[state] , identifier[m] , identifier[ad] = keyword[None] , identifier[tag] = identifier[crypto_secretstream_xchacha20poly1305_TAG_MESSAGE] , ): literal[string] identifier[ensure] ( identifier[isinstance] ( identifier[state] , identifier[crypto_secretstream_xchacha20poly1305_state] ), literal[string] , identifier[raising] = identifier[exc] . identifier[TypeError] , ) identifier[ensure] ( identifier[isinstance] ( identifier[m] , identifier[bytes] ), literal[string] , identifier[raising] = identifier[exc] . identifier[TypeError] ) identifier[ensure] ( identifier[len] ( identifier[m] )<= identifier[crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX] , literal[string] , identifier[raising] = identifier[exc] . identifier[ValueError] , ) identifier[ensure] ( identifier[ad] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[ad] , identifier[bytes] ), literal[string] , identifier[raising] = identifier[exc] . identifier[TypeError] , ) identifier[clen] = identifier[len] ( identifier[m] )+ identifier[crypto_secretstream_xchacha20poly1305_ABYTES] keyword[if] identifier[state] . identifier[rawbuf] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[state] . identifier[rawbuf] )< identifier[clen] : identifier[state] . identifier[rawbuf] = identifier[ffi] . identifier[new] ( literal[string] , identifier[clen] ) keyword[if] identifier[ad] keyword[is] keyword[None] : identifier[ad] = identifier[ffi] . identifier[NULL] identifier[adlen] = literal[int] keyword[else] : identifier[adlen] = identifier[len] ( identifier[ad] ) identifier[rc] = identifier[lib] . identifier[crypto_secretstream_xchacha20poly1305_push] ( identifier[state] . identifier[statebuf] , identifier[state] . identifier[rawbuf] , identifier[ffi] . identifier[NULL] , identifier[m] , identifier[len] ( identifier[m] ), identifier[ad] , identifier[adlen] , identifier[tag] , ) identifier[ensure] ( identifier[rc] == literal[int] , literal[string] , identifier[raising] = identifier[exc] . identifier[RuntimeError] ) keyword[return] identifier[ffi] . identifier[buffer] ( identifier[state] . identifier[rawbuf] , identifier[clen] )[:]
def crypto_secretstream_xchacha20poly1305_push(state, m, ad=None, tag=crypto_secretstream_xchacha20poly1305_TAG_MESSAGE): """ Add an encrypted message to the secret stream. :param state: a secretstream state object :type state: crypto_secretstream_xchacha20poly1305_state :param m: the message to encrypt, the maximum length of an individual message is :data:`.crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX`. :type m: bytes :param ad: additional data to include in the authentication tag :type ad: bytes or None :param tag: the message tag, usually :data:`.crypto_secretstream_xchacha20poly1305_TAG_MESSAGE` or :data:`.crypto_secretstream_xchacha20poly1305_TAG_FINAL`. :type tag: int :return: ciphertext :rtype: bytes """ ensure(isinstance(state, crypto_secretstream_xchacha20poly1305_state), 'State must be a crypto_secretstream_xchacha20poly1305_state object', raising=exc.TypeError) ensure(isinstance(m, bytes), 'Message is not bytes', raising=exc.TypeError) ensure(len(m) <= crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX, 'Message is too long', raising=exc.ValueError) ensure(ad is None or isinstance(ad, bytes), 'Additional data must be bytes or None', raising=exc.TypeError) clen = len(m) + crypto_secretstream_xchacha20poly1305_ABYTES if state.rawbuf is None or len(state.rawbuf) < clen: state.rawbuf = ffi.new('unsigned char[]', clen) # depends on [control=['if'], data=[]] if ad is None: ad = ffi.NULL adlen = 0 # depends on [control=['if'], data=['ad']] else: adlen = len(ad) rc = lib.crypto_secretstream_xchacha20poly1305_push(state.statebuf, state.rawbuf, ffi.NULL, m, len(m), ad, adlen, tag) ensure(rc == 0, 'Unexpected failure', raising=exc.RuntimeError) return ffi.buffer(state.rawbuf, clen)[:]
def get_signature(self, signature): """Retrieve one signature, discriminated by name or id. Note that signature name is not case sensitive. :param: a zobjects.Signature describing the signature like "Signature(name='my-sig')" :returns: a zobjects.Signature object, filled with the signature if no signature is matching, returns None. """ resp = self.request_list('GetSignatures') # GetSignature does not allow to filter the results, so we do it by # hand... if resp and (len(resp) > 0): for sig_dict in resp: sig = zobjects.Signature.from_dict(sig_dict) if hasattr(signature, 'id'): its_this_one = (sig.id == signature.id) elif hasattr(signature, 'name'): its_this_one = (sig.name.upper() == signature.name.upper()) else: raise ValueError('should mention one of id,name') if its_this_one: return sig else: return None
def function[get_signature, parameter[self, signature]]: constant[Retrieve one signature, discriminated by name or id. Note that signature name is not case sensitive. :param: a zobjects.Signature describing the signature like "Signature(name='my-sig')" :returns: a zobjects.Signature object, filled with the signature if no signature is matching, returns None. ] variable[resp] assign[=] call[name[self].request_list, parameter[constant[GetSignatures]]] if <ast.BoolOp object at 0x7da1b253f670> begin[:] for taget[name[sig_dict]] in starred[name[resp]] begin[:] variable[sig] assign[=] call[name[zobjects].Signature.from_dict, parameter[name[sig_dict]]] if call[name[hasattr], parameter[name[signature], constant[id]]] begin[:] variable[its_this_one] assign[=] compare[name[sig].id equal[==] name[signature].id] if name[its_this_one] begin[:] return[name[sig]]
keyword[def] identifier[get_signature] ( identifier[self] , identifier[signature] ): literal[string] identifier[resp] = identifier[self] . identifier[request_list] ( literal[string] ) keyword[if] identifier[resp] keyword[and] ( identifier[len] ( identifier[resp] )> literal[int] ): keyword[for] identifier[sig_dict] keyword[in] identifier[resp] : identifier[sig] = identifier[zobjects] . identifier[Signature] . identifier[from_dict] ( identifier[sig_dict] ) keyword[if] identifier[hasattr] ( identifier[signature] , literal[string] ): identifier[its_this_one] =( identifier[sig] . identifier[id] == identifier[signature] . identifier[id] ) keyword[elif] identifier[hasattr] ( identifier[signature] , literal[string] ): identifier[its_this_one] =( identifier[sig] . identifier[name] . identifier[upper] ()== identifier[signature] . identifier[name] . identifier[upper] ()) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[its_this_one] : keyword[return] identifier[sig] keyword[else] : keyword[return] keyword[None]
def get_signature(self, signature): """Retrieve one signature, discriminated by name or id. Note that signature name is not case sensitive. :param: a zobjects.Signature describing the signature like "Signature(name='my-sig')" :returns: a zobjects.Signature object, filled with the signature if no signature is matching, returns None. """ resp = self.request_list('GetSignatures') # GetSignature does not allow to filter the results, so we do it by # hand... if resp and len(resp) > 0: for sig_dict in resp: sig = zobjects.Signature.from_dict(sig_dict) if hasattr(signature, 'id'): its_this_one = sig.id == signature.id # depends on [control=['if'], data=[]] elif hasattr(signature, 'name'): its_this_one = sig.name.upper() == signature.name.upper() # depends on [control=['if'], data=[]] else: raise ValueError('should mention one of id,name') if its_this_one: return sig # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sig_dict']] # depends on [control=['if'], data=[]] else: return None
def adjust_jobs_priority(self, high_value_jobs, priority=1): """For every job priority determine if we need to increase or decrease the job priority Currently, high value jobs have a priority of 1 and a timeout of 0. """ # Only job priorities that don't have an expiration date (2 weeks for new jobs or year 2100 # for jobs update via load_preseed) are updated for jp in JobPriority.objects.filter(expiration_date__isnull=True): if jp.unique_identifier() not in high_value_jobs: if jp.priority != SETA_LOW_VALUE_PRIORITY: logger.warning('Decreasing priority of %s', jp.unique_identifier()) jp.priority = SETA_LOW_VALUE_PRIORITY jp.save(update_fields=['priority']) elif jp.priority != priority: logger.warning('Increasing priority of %s', jp.unique_identifier()) jp.priority = priority jp.save(update_fields=['priority'])
def function[adjust_jobs_priority, parameter[self, high_value_jobs, priority]]: constant[For every job priority determine if we need to increase or decrease the job priority Currently, high value jobs have a priority of 1 and a timeout of 0. ] for taget[name[jp]] in starred[call[name[JobPriority].objects.filter, parameter[]]] begin[:] if compare[call[name[jp].unique_identifier, parameter[]] <ast.NotIn object at 0x7da2590d7190> name[high_value_jobs]] begin[:] if compare[name[jp].priority not_equal[!=] name[SETA_LOW_VALUE_PRIORITY]] begin[:] call[name[logger].warning, parameter[constant[Decreasing priority of %s], call[name[jp].unique_identifier, parameter[]]]] name[jp].priority assign[=] name[SETA_LOW_VALUE_PRIORITY] call[name[jp].save, parameter[]]
keyword[def] identifier[adjust_jobs_priority] ( identifier[self] , identifier[high_value_jobs] , identifier[priority] = literal[int] ): literal[string] keyword[for] identifier[jp] keyword[in] identifier[JobPriority] . identifier[objects] . identifier[filter] ( identifier[expiration_date__isnull] = keyword[True] ): keyword[if] identifier[jp] . identifier[unique_identifier] () keyword[not] keyword[in] identifier[high_value_jobs] : keyword[if] identifier[jp] . identifier[priority] != identifier[SETA_LOW_VALUE_PRIORITY] : identifier[logger] . identifier[warning] ( literal[string] , identifier[jp] . identifier[unique_identifier] ()) identifier[jp] . identifier[priority] = identifier[SETA_LOW_VALUE_PRIORITY] identifier[jp] . identifier[save] ( identifier[update_fields] =[ literal[string] ]) keyword[elif] identifier[jp] . identifier[priority] != identifier[priority] : identifier[logger] . identifier[warning] ( literal[string] , identifier[jp] . identifier[unique_identifier] ()) identifier[jp] . identifier[priority] = identifier[priority] identifier[jp] . identifier[save] ( identifier[update_fields] =[ literal[string] ])
def adjust_jobs_priority(self, high_value_jobs, priority=1): """For every job priority determine if we need to increase or decrease the job priority Currently, high value jobs have a priority of 1 and a timeout of 0. """ # Only job priorities that don't have an expiration date (2 weeks for new jobs or year 2100 # for jobs update via load_preseed) are updated for jp in JobPriority.objects.filter(expiration_date__isnull=True): if jp.unique_identifier() not in high_value_jobs: if jp.priority != SETA_LOW_VALUE_PRIORITY: logger.warning('Decreasing priority of %s', jp.unique_identifier()) jp.priority = SETA_LOW_VALUE_PRIORITY jp.save(update_fields=['priority']) # depends on [control=['if'], data=['SETA_LOW_VALUE_PRIORITY']] # depends on [control=['if'], data=[]] elif jp.priority != priority: logger.warning('Increasing priority of %s', jp.unique_identifier()) jp.priority = priority jp.save(update_fields=['priority']) # depends on [control=['if'], data=['priority']] # depends on [control=['for'], data=['jp']]
def buildpack(self, url): """Add a buildpack by URL.""" cmd = ["heroku", "buildpacks:add", url, "--app", self.name] self._run(cmd)
def function[buildpack, parameter[self, url]]: constant[Add a buildpack by URL.] variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18ede6b00>, <ast.Constant object at 0x7da18ede6860>, <ast.Name object at 0x7da18ede7520>, <ast.Constant object at 0x7da18ede57e0>, <ast.Attribute object at 0x7da18ede7010>]] call[name[self]._run, parameter[name[cmd]]]
keyword[def] identifier[buildpack] ( identifier[self] , identifier[url] ): literal[string] identifier[cmd] =[ literal[string] , literal[string] , identifier[url] , literal[string] , identifier[self] . identifier[name] ] identifier[self] . identifier[_run] ( identifier[cmd] )
def buildpack(self, url): """Add a buildpack by URL.""" cmd = ['heroku', 'buildpacks:add', url, '--app', self.name] self._run(cmd)
def list_nodes_min(location=None, call=None): ''' Return a list of the VMs that are on the provider. Only a list of VM names, and their state, is returned. This is the minimum amount of information needed to check for existing VMs. ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_min function must be called with -f or --function.' ) ret = {} params = {'Action': 'DescribeInstances'} instances = aws.query(params, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4') if 'error' in instances: raise SaltCloudSystemExit( 'An error occurred while listing nodes: {0}'.format( instances['error']['Errors']['Error']['Message'] ) ) for instance in instances: if isinstance(instance['instancesSet']['item'], list): items = instance['instancesSet']['item'] else: items = [instance['instancesSet']['item']] for item in items: state = item['instanceState']['name'] name = _extract_name_tag(item) id = item['instanceId'] ret[name] = {'state': state, 'id': id} return ret
def function[list_nodes_min, parameter[location, call]]: constant[ Return a list of the VMs that are on the provider. Only a list of VM names, and their state, is returned. This is the minimum amount of information needed to check for existing VMs. ] if compare[name[call] equal[==] constant[action]] begin[:] <ast.Raise object at 0x7da18dc04e50> variable[ret] assign[=] dictionary[[], []] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20cabc700>], [<ast.Constant object at 0x7da1b1c722c0>]] variable[instances] assign[=] call[name[aws].query, parameter[name[params]]] if compare[constant[error] in name[instances]] begin[:] <ast.Raise object at 0x7da1b1c731f0> for taget[name[instance]] in starred[name[instances]] begin[:] if call[name[isinstance], parameter[call[call[name[instance]][constant[instancesSet]]][constant[item]], name[list]]] begin[:] variable[items] assign[=] call[call[name[instance]][constant[instancesSet]]][constant[item]] for taget[name[item]] in starred[name[items]] begin[:] variable[state] assign[=] call[call[name[item]][constant[instanceState]]][constant[name]] variable[name] assign[=] call[name[_extract_name_tag], parameter[name[item]]] variable[id] assign[=] call[name[item]][constant[instanceId]] call[name[ret]][name[name]] assign[=] dictionary[[<ast.Constant object at 0x7da1b21e2080>, <ast.Constant object at 0x7da1b21e06a0>], [<ast.Name object at 0x7da1b21e2800>, <ast.Name object at 0x7da1b21e1540>]] return[name[ret]]
keyword[def] identifier[list_nodes_min] ( identifier[location] = keyword[None] , identifier[call] = keyword[None] ): literal[string] keyword[if] identifier[call] == literal[string] : keyword[raise] identifier[SaltCloudSystemExit] ( literal[string] ) identifier[ret] ={} identifier[params] ={ literal[string] : literal[string] } identifier[instances] = identifier[aws] . identifier[query] ( identifier[params] , identifier[location] = identifier[get_location] (), identifier[provider] = identifier[get_provider] (), identifier[opts] = identifier[__opts__] , identifier[sigver] = literal[string] ) keyword[if] literal[string] keyword[in] identifier[instances] : keyword[raise] identifier[SaltCloudSystemExit] ( literal[string] . identifier[format] ( identifier[instances] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ] ) ) keyword[for] identifier[instance] keyword[in] identifier[instances] : keyword[if] identifier[isinstance] ( identifier[instance] [ literal[string] ][ literal[string] ], identifier[list] ): identifier[items] = identifier[instance] [ literal[string] ][ literal[string] ] keyword[else] : identifier[items] =[ identifier[instance] [ literal[string] ][ literal[string] ]] keyword[for] identifier[item] keyword[in] identifier[items] : identifier[state] = identifier[item] [ literal[string] ][ literal[string] ] identifier[name] = identifier[_extract_name_tag] ( identifier[item] ) identifier[id] = identifier[item] [ literal[string] ] identifier[ret] [ identifier[name] ]={ literal[string] : identifier[state] , literal[string] : identifier[id] } keyword[return] identifier[ret]
def list_nodes_min(location=None, call=None): """ Return a list of the VMs that are on the provider. Only a list of VM names, and their state, is returned. This is the minimum amount of information needed to check for existing VMs. """ if call == 'action': raise SaltCloudSystemExit('The list_nodes_min function must be called with -f or --function.') # depends on [control=['if'], data=[]] ret = {} params = {'Action': 'DescribeInstances'} instances = aws.query(params, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4') if 'error' in instances: raise SaltCloudSystemExit('An error occurred while listing nodes: {0}'.format(instances['error']['Errors']['Error']['Message'])) # depends on [control=['if'], data=['instances']] for instance in instances: if isinstance(instance['instancesSet']['item'], list): items = instance['instancesSet']['item'] # depends on [control=['if'], data=[]] else: items = [instance['instancesSet']['item']] for item in items: state = item['instanceState']['name'] name = _extract_name_tag(item) id = item['instanceId'] ret[name] = {'state': state, 'id': id} # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['instance']] return ret
def search_uris( self, uri, threat_types, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ This method is used to check whether a URI is on a given threatList. Example: >>> from google.cloud import webrisk_v1beta1 >>> from google.cloud.webrisk_v1beta1 import enums >>> >>> client = webrisk_v1beta1.WebRiskServiceV1Beta1Client() >>> >>> # TODO: Initialize `uri`: >>> uri = '' >>> >>> # TODO: Initialize `threat_types`: >>> threat_types = [] >>> >>> response = client.search_uris(uri, threat_types) Args: uri (str): The URI to be checked for matches. threat_types (list[~google.cloud.webrisk_v1beta1.types.ThreatType]): Required. The ThreatLists to search in. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.webrisk_v1beta1.types.SearchUrisResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "search_uris" not in self._inner_api_calls: self._inner_api_calls[ "search_uris" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.search_uris, default_retry=self._method_configs["SearchUris"].retry, default_timeout=self._method_configs["SearchUris"].timeout, client_info=self._client_info, ) request = webrisk_pb2.SearchUrisRequest(uri=uri, threat_types=threat_types) return self._inner_api_calls["search_uris"]( request, retry=retry, timeout=timeout, metadata=metadata )
def function[search_uris, parameter[self, uri, threat_types, retry, timeout, metadata]]: constant[ This method is used to check whether a URI is on a given threatList. Example: >>> from google.cloud import webrisk_v1beta1 >>> from google.cloud.webrisk_v1beta1 import enums >>> >>> client = webrisk_v1beta1.WebRiskServiceV1Beta1Client() >>> >>> # TODO: Initialize `uri`: >>> uri = '' >>> >>> # TODO: Initialize `threat_types`: >>> threat_types = [] >>> >>> response = client.search_uris(uri, threat_types) Args: uri (str): The URI to be checked for matches. threat_types (list[~google.cloud.webrisk_v1beta1.types.ThreatType]): Required. The ThreatLists to search in. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.webrisk_v1beta1.types.SearchUrisResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. ] if compare[constant[search_uris] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:] call[name[self]._inner_api_calls][constant[search_uris]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.search_uris]] variable[request] assign[=] call[name[webrisk_pb2].SearchUrisRequest, parameter[]] return[call[call[name[self]._inner_api_calls][constant[search_uris]], parameter[name[request]]]]
keyword[def] identifier[search_uris] ( identifier[self] , identifier[uri] , identifier[threat_types] , identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] , identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] , identifier[metadata] = keyword[None] , ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] : identifier[self] . identifier[_inner_api_calls] [ literal[string] ]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] ( identifier[self] . identifier[transport] . identifier[search_uris] , identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] , identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] , identifier[client_info] = identifier[self] . identifier[_client_info] , ) identifier[request] = identifier[webrisk_pb2] . identifier[SearchUrisRequest] ( identifier[uri] = identifier[uri] , identifier[threat_types] = identifier[threat_types] ) keyword[return] identifier[self] . identifier[_inner_api_calls] [ literal[string] ]( identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata] )
def search_uris(self, uri, threat_types, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ This method is used to check whether a URI is on a given threatList. Example: >>> from google.cloud import webrisk_v1beta1 >>> from google.cloud.webrisk_v1beta1 import enums >>> >>> client = webrisk_v1beta1.WebRiskServiceV1Beta1Client() >>> >>> # TODO: Initialize `uri`: >>> uri = '' >>> >>> # TODO: Initialize `threat_types`: >>> threat_types = [] >>> >>> response = client.search_uris(uri, threat_types) Args: uri (str): The URI to be checked for matches. threat_types (list[~google.cloud.webrisk_v1beta1.types.ThreatType]): Required. The ThreatLists to search in. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.webrisk_v1beta1.types.SearchUrisResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'search_uris' not in self._inner_api_calls: self._inner_api_calls['search_uris'] = google.api_core.gapic_v1.method.wrap_method(self.transport.search_uris, default_retry=self._method_configs['SearchUris'].retry, default_timeout=self._method_configs['SearchUris'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]] request = webrisk_pb2.SearchUrisRequest(uri=uri, threat_types=threat_types) return self._inner_api_calls['search_uris'](request, retry=retry, timeout=timeout, metadata=metadata)
def CrearPlantillaPDF(self, papel="A4", orientacion="portrait"): "Iniciar la creación del archivo PDF" # genero el renderizador con propiedades del PDF t = Template( format=papel, orientation=orientacion, title="F 1116 B/C %s" % (self.NroOrden), author="CUIT %s" % self.Cuit, subject="COE %s" % self.params_out.get('coe'), keywords="AFIP Liquidacion Electronica Primaria de Granos", creator='wslpg.py %s (http://www.PyAfipWs.com.ar)' % __version__,) self.template = t return True
def function[CrearPlantillaPDF, parameter[self, papel, orientacion]]: constant[Iniciar la creación del archivo PDF] variable[t] assign[=] call[name[Template], parameter[]] name[self].template assign[=] name[t] return[constant[True]]
keyword[def] identifier[CrearPlantillaPDF] ( identifier[self] , identifier[papel] = literal[string] , identifier[orientacion] = literal[string] ): literal[string] identifier[t] = identifier[Template] ( identifier[format] = identifier[papel] , identifier[orientation] = identifier[orientacion] , identifier[title] = literal[string] %( identifier[self] . identifier[NroOrden] ), identifier[author] = literal[string] % identifier[self] . identifier[Cuit] , identifier[subject] = literal[string] % identifier[self] . identifier[params_out] . identifier[get] ( literal[string] ), identifier[keywords] = literal[string] , identifier[creator] = literal[string] % identifier[__version__] ,) identifier[self] . identifier[template] = identifier[t] keyword[return] keyword[True]
def CrearPlantillaPDF(self, papel='A4', orientacion='portrait'): """Iniciar la creación del archivo PDF""" # genero el renderizador con propiedades del PDF t = Template(format=papel, orientation=orientacion, title='F 1116 B/C %s' % self.NroOrden, author='CUIT %s' % self.Cuit, subject='COE %s' % self.params_out.get('coe'), keywords='AFIP Liquidacion Electronica Primaria de Granos', creator='wslpg.py %s (http://www.PyAfipWs.com.ar)' % __version__) self.template = t return True
def addPositionToGraph( self, reference_id, position, position_types=None, strand=None): """ Add the positional information to the graph, following the faldo model. We assume that if the strand is None, we give it a generic "Position" only. Triples: my_position a (any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position) faldo:position Integer(numeric position) faldo:reference reference_id :param graph: :param reference_id: :param position: :param position_types: :param strand: :return: Identifier of the position created """ pos_id = self._makePositionId(reference_id, position, position_types) if position is not None: self.graph.addTriple( pos_id, self.globaltt['position'], position, object_is_literal=True, literal_type="xsd:integer") self.graph.addTriple(pos_id, self.globaltt['reference'], reference_id) if position_types is not None: for pos_type in position_types: self.model.addType(pos_id, pos_type) strnd = None if strand is not None: strnd = strand if not re.match(r'faldo', strand): # not already mapped to faldo, so expect we need to map it strnd = self._getStrandType(strand) # else: # strnd = self.globaltt['both_strand'] if strnd is None and (position_types is None or position_types == []): strnd = self.globaltt['Position'] if strnd is not None: self.model.addType(pos_id, strnd) return pos_id
def function[addPositionToGraph, parameter[self, reference_id, position, position_types, strand]]: constant[ Add the positional information to the graph, following the faldo model. We assume that if the strand is None, we give it a generic "Position" only. Triples: my_position a (any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position) faldo:position Integer(numeric position) faldo:reference reference_id :param graph: :param reference_id: :param position: :param position_types: :param strand: :return: Identifier of the position created ] variable[pos_id] assign[=] call[name[self]._makePositionId, parameter[name[reference_id], name[position], name[position_types]]] if compare[name[position] is_not constant[None]] begin[:] call[name[self].graph.addTriple, parameter[name[pos_id], call[name[self].globaltt][constant[position]], name[position]]] call[name[self].graph.addTriple, parameter[name[pos_id], call[name[self].globaltt][constant[reference]], name[reference_id]]] if compare[name[position_types] is_not constant[None]] begin[:] for taget[name[pos_type]] in starred[name[position_types]] begin[:] call[name[self].model.addType, parameter[name[pos_id], name[pos_type]]] variable[strnd] assign[=] constant[None] if compare[name[strand] is_not constant[None]] begin[:] variable[strnd] assign[=] name[strand] if <ast.UnaryOp object at 0x7da20c7953c0> begin[:] variable[strnd] assign[=] call[name[self]._getStrandType, parameter[name[strand]]] if <ast.BoolOp object at 0x7da20c795ab0> begin[:] variable[strnd] assign[=] call[name[self].globaltt][constant[Position]] if compare[name[strnd] is_not constant[None]] begin[:] call[name[self].model.addType, parameter[name[pos_id], name[strnd]]] return[name[pos_id]]
keyword[def] identifier[addPositionToGraph] ( identifier[self] , identifier[reference_id] , identifier[position] , identifier[position_types] = keyword[None] , identifier[strand] = keyword[None] ): literal[string] identifier[pos_id] = identifier[self] . identifier[_makePositionId] ( identifier[reference_id] , identifier[position] , identifier[position_types] ) keyword[if] identifier[position] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[graph] . identifier[addTriple] ( identifier[pos_id] , identifier[self] . identifier[globaltt] [ literal[string] ], identifier[position] , identifier[object_is_literal] = keyword[True] , identifier[literal_type] = literal[string] ) identifier[self] . identifier[graph] . identifier[addTriple] ( identifier[pos_id] , identifier[self] . identifier[globaltt] [ literal[string] ], identifier[reference_id] ) keyword[if] identifier[position_types] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[pos_type] keyword[in] identifier[position_types] : identifier[self] . identifier[model] . identifier[addType] ( identifier[pos_id] , identifier[pos_type] ) identifier[strnd] = keyword[None] keyword[if] identifier[strand] keyword[is] keyword[not] keyword[None] : identifier[strnd] = identifier[strand] keyword[if] keyword[not] identifier[re] . identifier[match] ( literal[string] , identifier[strand] ): identifier[strnd] = identifier[self] . identifier[_getStrandType] ( identifier[strand] ) keyword[if] identifier[strnd] keyword[is] keyword[None] keyword[and] ( identifier[position_types] keyword[is] keyword[None] keyword[or] identifier[position_types] ==[]): identifier[strnd] = identifier[self] . identifier[globaltt] [ literal[string] ] keyword[if] identifier[strnd] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[model] . identifier[addType] ( identifier[pos_id] , identifier[strnd] ) keyword[return] identifier[pos_id]
def addPositionToGraph(self, reference_id, position, position_types=None, strand=None): """ Add the positional information to the graph, following the faldo model. We assume that if the strand is None, we give it a generic "Position" only. Triples: my_position a (any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position) faldo:position Integer(numeric position) faldo:reference reference_id :param graph: :param reference_id: :param position: :param position_types: :param strand: :return: Identifier of the position created """ pos_id = self._makePositionId(reference_id, position, position_types) if position is not None: self.graph.addTriple(pos_id, self.globaltt['position'], position, object_is_literal=True, literal_type='xsd:integer') # depends on [control=['if'], data=['position']] self.graph.addTriple(pos_id, self.globaltt['reference'], reference_id) if position_types is not None: for pos_type in position_types: self.model.addType(pos_id, pos_type) # depends on [control=['for'], data=['pos_type']] # depends on [control=['if'], data=['position_types']] strnd = None if strand is not None: strnd = strand if not re.match('faldo', strand): # not already mapped to faldo, so expect we need to map it strnd = self._getStrandType(strand) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['strand']] # else: # strnd = self.globaltt['both_strand'] if strnd is None and (position_types is None or position_types == []): strnd = self.globaltt['Position'] # depends on [control=['if'], data=[]] if strnd is not None: self.model.addType(pos_id, strnd) # depends on [control=['if'], data=['strnd']] return pos_id
def create_object(self, text): ''' Allow creation of transaction parties using a full name string. ''' if self.create_field == 'name': if text.startswith('Location_'): this_id = text[len('Location_'):] this_loc = Location.objects.get(id=this_id) return self.get_queryset().get_or_create( name=this_loc.name,location=this_loc )[0] elif text.startswith('StaffMember_'): this_id = text[len('StaffMember_'):] this_member = StaffMember.objects.get(id=this_id) return self.get_queryset().get_or_create( name=this_member.fullName,staffMember=this_member, defaults={'user': getattr(this_member,'userAccount',None)} )[0] elif text.startswith('User_'): this_id = text[len('User_'):] this_user = User.objects.get(id=this_id) return self.get_queryset().get_or_create( name=this_user.get_full_name(),user=this_user, defaults={'staffMember': getattr(this_user,'staffmember',None)} )[0] else: return self.get_queryset().get_or_create( name=text,staffMember=None,user=None,location=None )[0] else: return super(TransactionPartyAutoComplete,self).create_object(text)
def function[create_object, parameter[self, text]]: constant[ Allow creation of transaction parties using a full name string. ] if compare[name[self].create_field equal[==] constant[name]] begin[:] if call[name[text].startswith, parameter[constant[Location_]]] begin[:] variable[this_id] assign[=] call[name[text]][<ast.Slice object at 0x7da1b1394ee0>] variable[this_loc] assign[=] call[name[Location].objects.get, parameter[]] return[call[call[call[name[self].get_queryset, parameter[]].get_or_create, parameter[]]][constant[0]]]
keyword[def] identifier[create_object] ( identifier[self] , identifier[text] ): literal[string] keyword[if] identifier[self] . identifier[create_field] == literal[string] : keyword[if] identifier[text] . identifier[startswith] ( literal[string] ): identifier[this_id] = identifier[text] [ identifier[len] ( literal[string] ):] identifier[this_loc] = identifier[Location] . identifier[objects] . identifier[get] ( identifier[id] = identifier[this_id] ) keyword[return] identifier[self] . identifier[get_queryset] (). identifier[get_or_create] ( identifier[name] = identifier[this_loc] . identifier[name] , identifier[location] = identifier[this_loc] )[ literal[int] ] keyword[elif] identifier[text] . identifier[startswith] ( literal[string] ): identifier[this_id] = identifier[text] [ identifier[len] ( literal[string] ):] identifier[this_member] = identifier[StaffMember] . identifier[objects] . identifier[get] ( identifier[id] = identifier[this_id] ) keyword[return] identifier[self] . identifier[get_queryset] (). identifier[get_or_create] ( identifier[name] = identifier[this_member] . identifier[fullName] , identifier[staffMember] = identifier[this_member] , identifier[defaults] ={ literal[string] : identifier[getattr] ( identifier[this_member] , literal[string] , keyword[None] )} )[ literal[int] ] keyword[elif] identifier[text] . identifier[startswith] ( literal[string] ): identifier[this_id] = identifier[text] [ identifier[len] ( literal[string] ):] identifier[this_user] = identifier[User] . identifier[objects] . identifier[get] ( identifier[id] = identifier[this_id] ) keyword[return] identifier[self] . identifier[get_queryset] (). identifier[get_or_create] ( identifier[name] = identifier[this_user] . identifier[get_full_name] (), identifier[user] = identifier[this_user] , identifier[defaults] ={ literal[string] : identifier[getattr] ( identifier[this_user] , literal[string] , keyword[None] )} )[ literal[int] ] keyword[else] : keyword[return] identifier[self] . identifier[get_queryset] (). identifier[get_or_create] ( identifier[name] = identifier[text] , identifier[staffMember] = keyword[None] , identifier[user] = keyword[None] , identifier[location] = keyword[None] )[ literal[int] ] keyword[else] : keyword[return] identifier[super] ( identifier[TransactionPartyAutoComplete] , identifier[self] ). identifier[create_object] ( identifier[text] )
def create_object(self, text): """ Allow creation of transaction parties using a full name string. """ if self.create_field == 'name': if text.startswith('Location_'): this_id = text[len('Location_'):] this_loc = Location.objects.get(id=this_id) return self.get_queryset().get_or_create(name=this_loc.name, location=this_loc)[0] # depends on [control=['if'], data=[]] elif text.startswith('StaffMember_'): this_id = text[len('StaffMember_'):] this_member = StaffMember.objects.get(id=this_id) return self.get_queryset().get_or_create(name=this_member.fullName, staffMember=this_member, defaults={'user': getattr(this_member, 'userAccount', None)})[0] # depends on [control=['if'], data=[]] elif text.startswith('User_'): this_id = text[len('User_'):] this_user = User.objects.get(id=this_id) return self.get_queryset().get_or_create(name=this_user.get_full_name(), user=this_user, defaults={'staffMember': getattr(this_user, 'staffmember', None)})[0] # depends on [control=['if'], data=[]] else: return self.get_queryset().get_or_create(name=text, staffMember=None, user=None, location=None)[0] # depends on [control=['if'], data=[]] else: return super(TransactionPartyAutoComplete, self).create_object(text)
def get_array(self, rowBased=True): """Return a two dimensional list with the values of the :py:obj:`self`. :param boolean rowBased: Indicates wether the returned list should be row or column based. Has to be True if list[i] should be the i'th row, False if list[i] should be the i'th column. :return: Returns a list representing the matrix rows containing lists representing the columns for each row. :rtype: list """ if rowBased: array = [] for row in xrange(self._rows): newRow = [] for col in xrange(self._columns): newRow.append(self.get_value(col, row)) array.append(newRow) return array return copy.deepcopy(self.matrix)
def function[get_array, parameter[self, rowBased]]: constant[Return a two dimensional list with the values of the :py:obj:`self`. :param boolean rowBased: Indicates wether the returned list should be row or column based. Has to be True if list[i] should be the i'th row, False if list[i] should be the i'th column. :return: Returns a list representing the matrix rows containing lists representing the columns for each row. :rtype: list ] if name[rowBased] begin[:] variable[array] assign[=] list[[]] for taget[name[row]] in starred[call[name[xrange], parameter[name[self]._rows]]] begin[:] variable[newRow] assign[=] list[[]] for taget[name[col]] in starred[call[name[xrange], parameter[name[self]._columns]]] begin[:] call[name[newRow].append, parameter[call[name[self].get_value, parameter[name[col], name[row]]]]] call[name[array].append, parameter[name[newRow]]] return[name[array]] return[call[name[copy].deepcopy, parameter[name[self].matrix]]]
keyword[def] identifier[get_array] ( identifier[self] , identifier[rowBased] = keyword[True] ): literal[string] keyword[if] identifier[rowBased] : identifier[array] =[] keyword[for] identifier[row] keyword[in] identifier[xrange] ( identifier[self] . identifier[_rows] ): identifier[newRow] =[] keyword[for] identifier[col] keyword[in] identifier[xrange] ( identifier[self] . identifier[_columns] ): identifier[newRow] . identifier[append] ( identifier[self] . identifier[get_value] ( identifier[col] , identifier[row] )) identifier[array] . identifier[append] ( identifier[newRow] ) keyword[return] identifier[array] keyword[return] identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[matrix] )
def get_array(self, rowBased=True): """Return a two dimensional list with the values of the :py:obj:`self`. :param boolean rowBased: Indicates wether the returned list should be row or column based. Has to be True if list[i] should be the i'th row, False if list[i] should be the i'th column. :return: Returns a list representing the matrix rows containing lists representing the columns for each row. :rtype: list """ if rowBased: array = [] for row in xrange(self._rows): newRow = [] for col in xrange(self._columns): newRow.append(self.get_value(col, row)) # depends on [control=['for'], data=['col']] array.append(newRow) # depends on [control=['for'], data=['row']] return array # depends on [control=['if'], data=[]] return copy.deepcopy(self.matrix)
def strip_fit(self, **kwargs): """Strip water and fit to the remaining system. First runs :meth:`strip_water` and then :meth:`fit`; see there for arguments. - *strip_input* is used for :meth:`strip_water` (but is only useful in special cases, e.g. when there is no Protein group defined. Then set *strip_input* = ``['Other']``. - *input* is passed on to :meth:`fit` and can contain the ``[center_group, fit_group, output_group]`` - *fitgroup* is only passed to :meth:`fit` and just contains the group to fit to ("backbone" by default) .. warning:: *fitgroup* can only be a Gromacs default group and not a custom group (because the indices change after stripping) - By default *fit* = "rot+trans" (and *fit* is passed to :meth:`fit`, together with the *xy* = ``False`` keyword) .. Note:: The call signature of :meth:`strip_water` is somewhat different from this one. """ kwargs.setdefault('fit', 'rot+trans') kw_fit = {} for k in ('xy', 'fit', 'fitgroup', 'input'): if k in kwargs: kw_fit[k] = kwargs.pop(k) kwargs['input'] = kwargs.pop('strip_input', ['Protein']) kwargs['force'] = kw_fit['force'] = kwargs.pop('force', self.force) paths = self.strip_water(**kwargs) # updates self.nowater transformer_nowater = self.nowater[paths['xtc']] # make sure to get the one we just produced return transformer_nowater.fit(**kw_fit)
def function[strip_fit, parameter[self]]: constant[Strip water and fit to the remaining system. First runs :meth:`strip_water` and then :meth:`fit`; see there for arguments. - *strip_input* is used for :meth:`strip_water` (but is only useful in special cases, e.g. when there is no Protein group defined. Then set *strip_input* = ``['Other']``. - *input* is passed on to :meth:`fit` and can contain the ``[center_group, fit_group, output_group]`` - *fitgroup* is only passed to :meth:`fit` and just contains the group to fit to ("backbone" by default) .. warning:: *fitgroup* can only be a Gromacs default group and not a custom group (because the indices change after stripping) - By default *fit* = "rot+trans" (and *fit* is passed to :meth:`fit`, together with the *xy* = ``False`` keyword) .. Note:: The call signature of :meth:`strip_water` is somewhat different from this one. ] call[name[kwargs].setdefault, parameter[constant[fit], constant[rot+trans]]] variable[kw_fit] assign[=] dictionary[[], []] for taget[name[k]] in starred[tuple[[<ast.Constant object at 0x7da207f98220>, <ast.Constant object at 0x7da207f98580>, <ast.Constant object at 0x7da207f9b3d0>, <ast.Constant object at 0x7da207f984c0>]]] begin[:] if compare[name[k] in name[kwargs]] begin[:] call[name[kw_fit]][name[k]] assign[=] call[name[kwargs].pop, parameter[name[k]]] call[name[kwargs]][constant[input]] assign[=] call[name[kwargs].pop, parameter[constant[strip_input], list[[<ast.Constant object at 0x7da2045673d0>]]]] call[name[kwargs]][constant[force]] assign[=] call[name[kwargs].pop, parameter[constant[force], name[self].force]] variable[paths] assign[=] call[name[self].strip_water, parameter[]] variable[transformer_nowater] assign[=] call[name[self].nowater][call[name[paths]][constant[xtc]]] return[call[name[transformer_nowater].fit, parameter[]]]
keyword[def] identifier[strip_fit] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[kwargs] . identifier[setdefault] ( literal[string] , literal[string] ) identifier[kw_fit] ={} keyword[for] identifier[k] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ): keyword[if] identifier[k] keyword[in] identifier[kwargs] : identifier[kw_fit] [ identifier[k] ]= identifier[kwargs] . identifier[pop] ( identifier[k] ) identifier[kwargs] [ literal[string] ]= identifier[kwargs] . identifier[pop] ( literal[string] ,[ literal[string] ]) identifier[kwargs] [ literal[string] ]= identifier[kw_fit] [ literal[string] ]= identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[force] ) identifier[paths] = identifier[self] . identifier[strip_water] (** identifier[kwargs] ) identifier[transformer_nowater] = identifier[self] . identifier[nowater] [ identifier[paths] [ literal[string] ]] keyword[return] identifier[transformer_nowater] . identifier[fit] (** identifier[kw_fit] )
def strip_fit(self, **kwargs): """Strip water and fit to the remaining system. First runs :meth:`strip_water` and then :meth:`fit`; see there for arguments. - *strip_input* is used for :meth:`strip_water` (but is only useful in special cases, e.g. when there is no Protein group defined. Then set *strip_input* = ``['Other']``. - *input* is passed on to :meth:`fit` and can contain the ``[center_group, fit_group, output_group]`` - *fitgroup* is only passed to :meth:`fit` and just contains the group to fit to ("backbone" by default) .. warning:: *fitgroup* can only be a Gromacs default group and not a custom group (because the indices change after stripping) - By default *fit* = "rot+trans" (and *fit* is passed to :meth:`fit`, together with the *xy* = ``False`` keyword) .. Note:: The call signature of :meth:`strip_water` is somewhat different from this one. """ kwargs.setdefault('fit', 'rot+trans') kw_fit = {} for k in ('xy', 'fit', 'fitgroup', 'input'): if k in kwargs: kw_fit[k] = kwargs.pop(k) # depends on [control=['if'], data=['k', 'kwargs']] # depends on [control=['for'], data=['k']] kwargs['input'] = kwargs.pop('strip_input', ['Protein']) kwargs['force'] = kw_fit['force'] = kwargs.pop('force', self.force) paths = self.strip_water(**kwargs) # updates self.nowater transformer_nowater = self.nowater[paths['xtc']] # make sure to get the one we just produced return transformer_nowater.fit(**kw_fit)
def group_exists(self, group_name): """Checks a group via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to be checked :returns: True if group exists :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_CLOUD, 'groups?search=' + group_name ) if res.status_code == 200: tree = ET.fromstring(res.content) for code_el in tree.findall('data/groups/element'): if code_el is not None and code_el.text == group_name: return True return False raise HTTPResponseError(res)
def function[group_exists, parameter[self, group_name]]: constant[Checks a group via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to be checked :returns: True if group exists :raises: HTTPResponseError in case an HTTP error status was returned ] variable[res] assign[=] call[name[self]._make_ocs_request, parameter[constant[GET], name[self].OCS_SERVICE_CLOUD, binary_operation[constant[groups?search=] + name[group_name]]]] if compare[name[res].status_code equal[==] constant[200]] begin[:] variable[tree] assign[=] call[name[ET].fromstring, parameter[name[res].content]] for taget[name[code_el]] in starred[call[name[tree].findall, parameter[constant[data/groups/element]]]] begin[:] if <ast.BoolOp object at 0x7da20c6c5270> begin[:] return[constant[True]] return[constant[False]] <ast.Raise object at 0x7da20c6c6860>
keyword[def] identifier[group_exists] ( identifier[self] , identifier[group_name] ): literal[string] identifier[res] = identifier[self] . identifier[_make_ocs_request] ( literal[string] , identifier[self] . identifier[OCS_SERVICE_CLOUD] , literal[string] + identifier[group_name] ) keyword[if] identifier[res] . identifier[status_code] == literal[int] : identifier[tree] = identifier[ET] . identifier[fromstring] ( identifier[res] . identifier[content] ) keyword[for] identifier[code_el] keyword[in] identifier[tree] . identifier[findall] ( literal[string] ): keyword[if] identifier[code_el] keyword[is] keyword[not] keyword[None] keyword[and] identifier[code_el] . identifier[text] == identifier[group_name] : keyword[return] keyword[True] keyword[return] keyword[False] keyword[raise] identifier[HTTPResponseError] ( identifier[res] )
def group_exists(self, group_name): """Checks a group via provisioning API. If you get back an error 999, then the provisioning API is not enabled. :param group_name: name of group to be checked :returns: True if group exists :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request('GET', self.OCS_SERVICE_CLOUD, 'groups?search=' + group_name) if res.status_code == 200: tree = ET.fromstring(res.content) for code_el in tree.findall('data/groups/element'): if code_el is not None and code_el.text == group_name: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['code_el']] return False # depends on [control=['if'], data=[]] raise HTTPResponseError(res)
def memoize(func=None, maxlen=None): """Cache a function's return value each time it is called. This function serves as a function decorator to provide a caching of evaluated fitness values. If called later with the same arguments, the cached value is returned instead of being re-evaluated. This decorator assumes that candidates are individually pickleable, and their pickled values are used for hashing into a dictionary. It should be used when evaluating an *expensive* fitness function to avoid costly re-evaluation of those fitnesses. The typical usage is as follows:: @memoize def expensive_fitness_function(candidates, args): # Implementation of expensive fitness calculation pass It is also possible to provide the named argument *maxlen*, which specifies the size of the memoization cache to use. (If *maxlen* is ``None``, then an unbounded cache is used.) Once the size of the cache has reached *maxlen*, the oldest element is replaced by the newest element in order to keep the size constant. This usage is as follows:: @memoize(maxlen=100) def expensive_fitness_function(candidates, args): # Implementation of expensive fitness calculation pass .. warning:: The ``maxlen`` parameter must be passed as a named keyword argument, or an ``AttributeError`` will be raised (e.g., saying ``@memoize(100)`` will cause an error). """ if func is not None: cache = BoundedOrderedDict(maxlen=maxlen) @functools.wraps(func) def memo_target(candidates, args): fitness = [] for candidate in candidates: lookup_value = pickle.dumps(candidate, 1) if lookup_value not in cache: cache[lookup_value] = func([candidate], args)[0] fitness.append(cache[lookup_value]) return fitness return memo_target else: def memoize_factory(func): return memoize(func, maxlen=maxlen) return memoize_factory
def function[memoize, parameter[func, maxlen]]: constant[Cache a function's return value each time it is called. This function serves as a function decorator to provide a caching of evaluated fitness values. If called later with the same arguments, the cached value is returned instead of being re-evaluated. This decorator assumes that candidates are individually pickleable, and their pickled values are used for hashing into a dictionary. It should be used when evaluating an *expensive* fitness function to avoid costly re-evaluation of those fitnesses. The typical usage is as follows:: @memoize def expensive_fitness_function(candidates, args): # Implementation of expensive fitness calculation pass It is also possible to provide the named argument *maxlen*, which specifies the size of the memoization cache to use. (If *maxlen* is ``None``, then an unbounded cache is used.) Once the size of the cache has reached *maxlen*, the oldest element is replaced by the newest element in order to keep the size constant. This usage is as follows:: @memoize(maxlen=100) def expensive_fitness_function(candidates, args): # Implementation of expensive fitness calculation pass .. warning:: The ``maxlen`` parameter must be passed as a named keyword argument, or an ``AttributeError`` will be raised (e.g., saying ``@memoize(100)`` will cause an error). ] if compare[name[func] is_not constant[None]] begin[:] variable[cache] assign[=] call[name[BoundedOrderedDict], parameter[]] def function[memo_target, parameter[candidates, args]]: variable[fitness] assign[=] list[[]] for taget[name[candidate]] in starred[name[candidates]] begin[:] variable[lookup_value] assign[=] call[name[pickle].dumps, parameter[name[candidate], constant[1]]] if compare[name[lookup_value] <ast.NotIn object at 0x7da2590d7190> name[cache]] begin[:] call[name[cache]][name[lookup_value]] assign[=] call[call[name[func], parameter[list[[<ast.Name object at 0x7da1b1358e50>]], name[args]]]][constant[0]] call[name[fitness].append, parameter[call[name[cache]][name[lookup_value]]]] return[name[fitness]] return[name[memo_target]]
keyword[def] identifier[memoize] ( identifier[func] = keyword[None] , identifier[maxlen] = keyword[None] ): literal[string] keyword[if] identifier[func] keyword[is] keyword[not] keyword[None] : identifier[cache] = identifier[BoundedOrderedDict] ( identifier[maxlen] = identifier[maxlen] ) @ identifier[functools] . identifier[wraps] ( identifier[func] ) keyword[def] identifier[memo_target] ( identifier[candidates] , identifier[args] ): identifier[fitness] =[] keyword[for] identifier[candidate] keyword[in] identifier[candidates] : identifier[lookup_value] = identifier[pickle] . identifier[dumps] ( identifier[candidate] , literal[int] ) keyword[if] identifier[lookup_value] keyword[not] keyword[in] identifier[cache] : identifier[cache] [ identifier[lookup_value] ]= identifier[func] ([ identifier[candidate] ], identifier[args] )[ literal[int] ] identifier[fitness] . identifier[append] ( identifier[cache] [ identifier[lookup_value] ]) keyword[return] identifier[fitness] keyword[return] identifier[memo_target] keyword[else] : keyword[def] identifier[memoize_factory] ( identifier[func] ): keyword[return] identifier[memoize] ( identifier[func] , identifier[maxlen] = identifier[maxlen] ) keyword[return] identifier[memoize_factory]
def memoize(func=None, maxlen=None): """Cache a function's return value each time it is called. This function serves as a function decorator to provide a caching of evaluated fitness values. If called later with the same arguments, the cached value is returned instead of being re-evaluated. This decorator assumes that candidates are individually pickleable, and their pickled values are used for hashing into a dictionary. It should be used when evaluating an *expensive* fitness function to avoid costly re-evaluation of those fitnesses. The typical usage is as follows:: @memoize def expensive_fitness_function(candidates, args): # Implementation of expensive fitness calculation pass It is also possible to provide the named argument *maxlen*, which specifies the size of the memoization cache to use. (If *maxlen* is ``None``, then an unbounded cache is used.) Once the size of the cache has reached *maxlen*, the oldest element is replaced by the newest element in order to keep the size constant. This usage is as follows:: @memoize(maxlen=100) def expensive_fitness_function(candidates, args): # Implementation of expensive fitness calculation pass .. warning:: The ``maxlen`` parameter must be passed as a named keyword argument, or an ``AttributeError`` will be raised (e.g., saying ``@memoize(100)`` will cause an error). """ if func is not None: cache = BoundedOrderedDict(maxlen=maxlen) @functools.wraps(func) def memo_target(candidates, args): fitness = [] for candidate in candidates: lookup_value = pickle.dumps(candidate, 1) if lookup_value not in cache: cache[lookup_value] = func([candidate], args)[0] # depends on [control=['if'], data=['lookup_value', 'cache']] fitness.append(cache[lookup_value]) # depends on [control=['for'], data=['candidate']] return fitness return memo_target # depends on [control=['if'], data=['func']] else: def memoize_factory(func): return memoize(func, maxlen=maxlen) return memoize_factory
def _follow_link(self, link_path_components, link): """Follow a link w.r.t. a path resolved so far. The component is either a real file, which is a no-op, or a symlink. In the case of a symlink, we have to modify the path as built up so far /a/b => ../c should yield /a/../c (which will normalize to /a/c) /a/b => x should yield /a/x /a/b => /x/y/z should yield /x/y/z The modified path may land us in a new spot which is itself a link, so we may repeat the process. Args: link_path_components: The resolved path built up to the link so far. link: The link object itself. Returns: (string) The updated path resolved after following the link. Raises: IOError: if there are too many levels of symbolic link """ link_path = link.contents sep = self._path_separator(link_path) # For links to absolute paths, we want to throw out everything # in the path built so far and replace with the link. For relative # links, we have to append the link to what we have so far, if not self._starts_with_root_path(link_path): # Relative path. Append remainder of path to what we have # processed so far, excluding the name of the link itself. # /a/b => ../c should yield /a/../c # (which will normalize to /c) # /a/b => d should yield a/d components = link_path_components[:-1] components.append(link_path) link_path = sep.join(components) # Don't call self.NormalizePath(), as we don't want to prepend # self.cwd. return self.normpath(link_path)
def function[_follow_link, parameter[self, link_path_components, link]]: constant[Follow a link w.r.t. a path resolved so far. The component is either a real file, which is a no-op, or a symlink. In the case of a symlink, we have to modify the path as built up so far /a/b => ../c should yield /a/../c (which will normalize to /a/c) /a/b => x should yield /a/x /a/b => /x/y/z should yield /x/y/z The modified path may land us in a new spot which is itself a link, so we may repeat the process. Args: link_path_components: The resolved path built up to the link so far. link: The link object itself. Returns: (string) The updated path resolved after following the link. Raises: IOError: if there are too many levels of symbolic link ] variable[link_path] assign[=] name[link].contents variable[sep] assign[=] call[name[self]._path_separator, parameter[name[link_path]]] if <ast.UnaryOp object at 0x7da18f00ebc0> begin[:] variable[components] assign[=] call[name[link_path_components]][<ast.Slice object at 0x7da18f00e5f0>] call[name[components].append, parameter[name[link_path]]] variable[link_path] assign[=] call[name[sep].join, parameter[name[components]]] return[call[name[self].normpath, parameter[name[link_path]]]]
keyword[def] identifier[_follow_link] ( identifier[self] , identifier[link_path_components] , identifier[link] ): literal[string] identifier[link_path] = identifier[link] . identifier[contents] identifier[sep] = identifier[self] . identifier[_path_separator] ( identifier[link_path] ) keyword[if] keyword[not] identifier[self] . identifier[_starts_with_root_path] ( identifier[link_path] ): identifier[components] = identifier[link_path_components] [:- literal[int] ] identifier[components] . identifier[append] ( identifier[link_path] ) identifier[link_path] = identifier[sep] . identifier[join] ( identifier[components] ) keyword[return] identifier[self] . identifier[normpath] ( identifier[link_path] )
def _follow_link(self, link_path_components, link): """Follow a link w.r.t. a path resolved so far. The component is either a real file, which is a no-op, or a symlink. In the case of a symlink, we have to modify the path as built up so far /a/b => ../c should yield /a/../c (which will normalize to /a/c) /a/b => x should yield /a/x /a/b => /x/y/z should yield /x/y/z The modified path may land us in a new spot which is itself a link, so we may repeat the process. Args: link_path_components: The resolved path built up to the link so far. link: The link object itself. Returns: (string) The updated path resolved after following the link. Raises: IOError: if there are too many levels of symbolic link """ link_path = link.contents sep = self._path_separator(link_path) # For links to absolute paths, we want to throw out everything # in the path built so far and replace with the link. For relative # links, we have to append the link to what we have so far, if not self._starts_with_root_path(link_path): # Relative path. Append remainder of path to what we have # processed so far, excluding the name of the link itself. # /a/b => ../c should yield /a/../c # (which will normalize to /c) # /a/b => d should yield a/d components = link_path_components[:-1] components.append(link_path) link_path = sep.join(components) # depends on [control=['if'], data=[]] # Don't call self.NormalizePath(), as we don't want to prepend # self.cwd. return self.normpath(link_path)
def _deliver_errored_events(errstream, recs): """Deliver errors to error stream.""" rlogger.info("Going to handle %s failed events", len(recs)) rlogger.info( "First failed event: %s", json.dumps(recs[0], indent=4)) kinesis_stream = errstream.get("kinesis_stream") randomkey = str(uuid.uuid4()) if kinesis_stream: send_to_kinesis_stream( recs, kinesis_stream, partition_key=errstream.get("partition_key", randomkey)) rlogger.info("Sent errors to Kinesis stream '%s'", errstream) delivery_stream = errstream.get("firehose_delivery_stream") if delivery_stream: send_to_delivery_stream(errevents, delivery_stream) rlogger.info("Sent error payload to Firehose delivery stream '%s'", delivery_stream)
def function[_deliver_errored_events, parameter[errstream, recs]]: constant[Deliver errors to error stream.] call[name[rlogger].info, parameter[constant[Going to handle %s failed events], call[name[len], parameter[name[recs]]]]] call[name[rlogger].info, parameter[constant[First failed event: %s], call[name[json].dumps, parameter[call[name[recs]][constant[0]]]]]] variable[kinesis_stream] assign[=] call[name[errstream].get, parameter[constant[kinesis_stream]]] variable[randomkey] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]] if name[kinesis_stream] begin[:] call[name[send_to_kinesis_stream], parameter[name[recs], name[kinesis_stream]]] call[name[rlogger].info, parameter[constant[Sent errors to Kinesis stream '%s'], name[errstream]]] variable[delivery_stream] assign[=] call[name[errstream].get, parameter[constant[firehose_delivery_stream]]] if name[delivery_stream] begin[:] call[name[send_to_delivery_stream], parameter[name[errevents], name[delivery_stream]]] call[name[rlogger].info, parameter[constant[Sent error payload to Firehose delivery stream '%s'], name[delivery_stream]]]
keyword[def] identifier[_deliver_errored_events] ( identifier[errstream] , identifier[recs] ): literal[string] identifier[rlogger] . identifier[info] ( literal[string] , identifier[len] ( identifier[recs] )) identifier[rlogger] . identifier[info] ( literal[string] , identifier[json] . identifier[dumps] ( identifier[recs] [ literal[int] ], identifier[indent] = literal[int] )) identifier[kinesis_stream] = identifier[errstream] . identifier[get] ( literal[string] ) identifier[randomkey] = identifier[str] ( identifier[uuid] . identifier[uuid4] ()) keyword[if] identifier[kinesis_stream] : identifier[send_to_kinesis_stream] ( identifier[recs] , identifier[kinesis_stream] , identifier[partition_key] = identifier[errstream] . identifier[get] ( literal[string] , identifier[randomkey] )) identifier[rlogger] . identifier[info] ( literal[string] , identifier[errstream] ) identifier[delivery_stream] = identifier[errstream] . identifier[get] ( literal[string] ) keyword[if] identifier[delivery_stream] : identifier[send_to_delivery_stream] ( identifier[errevents] , identifier[delivery_stream] ) identifier[rlogger] . identifier[info] ( literal[string] , identifier[delivery_stream] )
def _deliver_errored_events(errstream, recs): """Deliver errors to error stream.""" rlogger.info('Going to handle %s failed events', len(recs)) rlogger.info('First failed event: %s', json.dumps(recs[0], indent=4)) kinesis_stream = errstream.get('kinesis_stream') randomkey = str(uuid.uuid4()) if kinesis_stream: send_to_kinesis_stream(recs, kinesis_stream, partition_key=errstream.get('partition_key', randomkey)) rlogger.info("Sent errors to Kinesis stream '%s'", errstream) # depends on [control=['if'], data=[]] delivery_stream = errstream.get('firehose_delivery_stream') if delivery_stream: send_to_delivery_stream(errevents, delivery_stream) rlogger.info("Sent error payload to Firehose delivery stream '%s'", delivery_stream) # depends on [control=['if'], data=[]]
def _expand_placeholder_value(value): """ Return the SQL string representation of the specified placeholder's value. @param value: the value of a placeholder such as a simple element, a list, or a tuple of one string. @note: by convention, a tuple of one string indicates that this string MUST not be quoted as it represents, for instance, a called to a stored procedure, and not a textual content to modify into a table. @return: a SQL string representation. """ if isinstance(value, (list, set)) or (isinstance(value, tuple) and len(value) != 1): sql_value = ','.join( [ RdbmsConnection._to_sql_value( element if not isinstance(element, tuple) else element[0], noquote=isinstance(element, tuple)) for element in value ]) elif isinstance(value, tuple): assert len(value) == 1 value = value[0] assert value is None or isinstance(value, basestring), 'basestring expected instead of %s' % type(value) sql_value = RdbmsConnection._to_sql_value(value, True) else: sql_value = RdbmsConnection._to_sql_value(value) return sql_value
def function[_expand_placeholder_value, parameter[value]]: constant[ Return the SQL string representation of the specified placeholder's value. @param value: the value of a placeholder such as a simple element, a list, or a tuple of one string. @note: by convention, a tuple of one string indicates that this string MUST not be quoted as it represents, for instance, a called to a stored procedure, and not a textual content to modify into a table. @return: a SQL string representation. ] if <ast.BoolOp object at 0x7da1b2581a20> begin[:] variable[sql_value] assign[=] call[constant[,].join, parameter[<ast.ListComp object at 0x7da20e9b3940>]] return[name[sql_value]]
keyword[def] identifier[_expand_placeholder_value] ( identifier[value] ): literal[string] keyword[if] identifier[isinstance] ( identifier[value] ,( identifier[list] , identifier[set] )) keyword[or] ( identifier[isinstance] ( identifier[value] , identifier[tuple] ) keyword[and] identifier[len] ( identifier[value] )!= literal[int] ): identifier[sql_value] = literal[string] . identifier[join] ([ identifier[RdbmsConnection] . identifier[_to_sql_value] ( identifier[element] keyword[if] keyword[not] identifier[isinstance] ( identifier[element] , identifier[tuple] ) keyword[else] identifier[element] [ literal[int] ], identifier[noquote] = identifier[isinstance] ( identifier[element] , identifier[tuple] )) keyword[for] identifier[element] keyword[in] identifier[value] ]) keyword[elif] identifier[isinstance] ( identifier[value] , identifier[tuple] ): keyword[assert] identifier[len] ( identifier[value] )== literal[int] identifier[value] = identifier[value] [ literal[int] ] keyword[assert] identifier[value] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[value] , identifier[basestring] ), literal[string] % identifier[type] ( identifier[value] ) identifier[sql_value] = identifier[RdbmsConnection] . identifier[_to_sql_value] ( identifier[value] , keyword[True] ) keyword[else] : identifier[sql_value] = identifier[RdbmsConnection] . identifier[_to_sql_value] ( identifier[value] ) keyword[return] identifier[sql_value]
def _expand_placeholder_value(value): """ Return the SQL string representation of the specified placeholder's value. @param value: the value of a placeholder such as a simple element, a list, or a tuple of one string. @note: by convention, a tuple of one string indicates that this string MUST not be quoted as it represents, for instance, a called to a stored procedure, and not a textual content to modify into a table. @return: a SQL string representation. """ if isinstance(value, (list, set)) or (isinstance(value, tuple) and len(value) != 1): sql_value = ','.join([RdbmsConnection._to_sql_value(element if not isinstance(element, tuple) else element[0], noquote=isinstance(element, tuple)) for element in value]) # depends on [control=['if'], data=[]] elif isinstance(value, tuple): assert len(value) == 1 value = value[0] assert value is None or isinstance(value, basestring), 'basestring expected instead of %s' % type(value) sql_value = RdbmsConnection._to_sql_value(value, True) # depends on [control=['if'], data=[]] else: sql_value = RdbmsConnection._to_sql_value(value) return sql_value
def _cached_pages(self, target_page=-1): """ Get a page or all pages from page generator, caching results. This is necessary because PDFMiner searches recursively for pages, so we won't know how many there are until we parse the whole document, which we don't want to do until we need to. """ try: # pdfminer < 20131022 self._pages_iter = self._pages_iter or self.doc.get_pages() except AttributeError: # pdfminer >= 20131022 self._pages_iter = self._pages_iter or \ PDFPage.create_pages(self.doc) if target_page >= 0: while len(self._pages) <= target_page: next_page = next(self._pages_iter) if not next_page: return None next_page.page_number = 0 self._pages += [next_page] try: return self._pages[target_page] except IndexError: return None self._pages += list(self._pages_iter) return self._pages
def function[_cached_pages, parameter[self, target_page]]: constant[ Get a page or all pages from page generator, caching results. This is necessary because PDFMiner searches recursively for pages, so we won't know how many there are until we parse the whole document, which we don't want to do until we need to. ] <ast.Try object at 0x7da18bcc97e0> if compare[name[target_page] greater_or_equal[>=] constant[0]] begin[:] while compare[call[name[len], parameter[name[self]._pages]] less_or_equal[<=] name[target_page]] begin[:] variable[next_page] assign[=] call[name[next], parameter[name[self]._pages_iter]] if <ast.UnaryOp object at 0x7da18bcc8ee0> begin[:] return[constant[None]] name[next_page].page_number assign[=] constant[0] <ast.AugAssign object at 0x7da18bcc8940> <ast.Try object at 0x7da18bcc9b70> <ast.AugAssign object at 0x7da18bccb5b0> return[name[self]._pages]
keyword[def] identifier[_cached_pages] ( identifier[self] , identifier[target_page] =- literal[int] ): literal[string] keyword[try] : identifier[self] . identifier[_pages_iter] = identifier[self] . identifier[_pages_iter] keyword[or] identifier[self] . identifier[doc] . identifier[get_pages] () keyword[except] identifier[AttributeError] : identifier[self] . identifier[_pages_iter] = identifier[self] . identifier[_pages_iter] keyword[or] identifier[PDFPage] . identifier[create_pages] ( identifier[self] . identifier[doc] ) keyword[if] identifier[target_page] >= literal[int] : keyword[while] identifier[len] ( identifier[self] . identifier[_pages] )<= identifier[target_page] : identifier[next_page] = identifier[next] ( identifier[self] . identifier[_pages_iter] ) keyword[if] keyword[not] identifier[next_page] : keyword[return] keyword[None] identifier[next_page] . identifier[page_number] = literal[int] identifier[self] . identifier[_pages] +=[ identifier[next_page] ] keyword[try] : keyword[return] identifier[self] . identifier[_pages] [ identifier[target_page] ] keyword[except] identifier[IndexError] : keyword[return] keyword[None] identifier[self] . identifier[_pages] += identifier[list] ( identifier[self] . identifier[_pages_iter] ) keyword[return] identifier[self] . identifier[_pages]
def _cached_pages(self, target_page=-1): """ Get a page or all pages from page generator, caching results. This is necessary because PDFMiner searches recursively for pages, so we won't know how many there are until we parse the whole document, which we don't want to do until we need to. """ try: # pdfminer < 20131022 self._pages_iter = self._pages_iter or self.doc.get_pages() # depends on [control=['try'], data=[]] except AttributeError: # pdfminer >= 20131022 self._pages_iter = self._pages_iter or PDFPage.create_pages(self.doc) # depends on [control=['except'], data=[]] if target_page >= 0: while len(self._pages) <= target_page: next_page = next(self._pages_iter) if not next_page: return None # depends on [control=['if'], data=[]] next_page.page_number = 0 self._pages += [next_page] # depends on [control=['while'], data=[]] try: return self._pages[target_page] # depends on [control=['try'], data=[]] except IndexError: return None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['target_page']] self._pages += list(self._pages_iter) return self._pages
def registerCategory(category): """ Register a given category in the debug system. A level will be assigned to it based on previous calls to setDebug. """ # parse what level it is set to based on _DEBUG # example: *:2,admin:4 global _DEBUG global _levels global _categories level = 0 chunks = _DEBUG.split(',') for chunk in chunks: if not chunk: continue if ':' in chunk: spec, value = chunk.split(':') else: spec = '*' value = chunk # our glob is unix filename style globbing, so cheat with fnmatch # fnmatch.fnmatch didn't work for this, so don't use it if category in fnmatch.filter((category, ), spec): # we have a match, so set level based on string or int if not value: continue try: level = int(value) except ValueError: # e.g. *; we default to most level = 5 # store it _categories[category] = level
def function[registerCategory, parameter[category]]: constant[ Register a given category in the debug system. A level will be assigned to it based on previous calls to setDebug. ] <ast.Global object at 0x7da1b0a48e20> <ast.Global object at 0x7da1b0a4ad40> <ast.Global object at 0x7da1b0a49b70> variable[level] assign[=] constant[0] variable[chunks] assign[=] call[name[_DEBUG].split, parameter[constant[,]]] for taget[name[chunk]] in starred[name[chunks]] begin[:] if <ast.UnaryOp object at 0x7da1b0a48490> begin[:] continue if compare[constant[:] in name[chunk]] begin[:] <ast.Tuple object at 0x7da1b0a48850> assign[=] call[name[chunk].split, parameter[constant[:]]] if compare[name[category] in call[name[fnmatch].filter, parameter[tuple[[<ast.Name object at 0x7da1b0a4a4d0>]], name[spec]]]] begin[:] if <ast.UnaryOp object at 0x7da1b0a497b0> begin[:] continue <ast.Try object at 0x7da1b0a4af20> call[name[_categories]][name[category]] assign[=] name[level]
keyword[def] identifier[registerCategory] ( identifier[category] ): literal[string] keyword[global] identifier[_DEBUG] keyword[global] identifier[_levels] keyword[global] identifier[_categories] identifier[level] = literal[int] identifier[chunks] = identifier[_DEBUG] . identifier[split] ( literal[string] ) keyword[for] identifier[chunk] keyword[in] identifier[chunks] : keyword[if] keyword[not] identifier[chunk] : keyword[continue] keyword[if] literal[string] keyword[in] identifier[chunk] : identifier[spec] , identifier[value] = identifier[chunk] . identifier[split] ( literal[string] ) keyword[else] : identifier[spec] = literal[string] identifier[value] = identifier[chunk] keyword[if] identifier[category] keyword[in] identifier[fnmatch] . identifier[filter] (( identifier[category] ,), identifier[spec] ): keyword[if] keyword[not] identifier[value] : keyword[continue] keyword[try] : identifier[level] = identifier[int] ( identifier[value] ) keyword[except] identifier[ValueError] : identifier[level] = literal[int] identifier[_categories] [ identifier[category] ]= identifier[level]
def registerCategory(category): """ Register a given category in the debug system. A level will be assigned to it based on previous calls to setDebug. """ # parse what level it is set to based on _DEBUG # example: *:2,admin:4 global _DEBUG global _levels global _categories level = 0 chunks = _DEBUG.split(',') for chunk in chunks: if not chunk: continue # depends on [control=['if'], data=[]] if ':' in chunk: (spec, value) = chunk.split(':') # depends on [control=['if'], data=['chunk']] else: spec = '*' value = chunk # our glob is unix filename style globbing, so cheat with fnmatch # fnmatch.fnmatch didn't work for this, so don't use it if category in fnmatch.filter((category,), spec): # we have a match, so set level based on string or int if not value: continue # depends on [control=['if'], data=[]] try: level = int(value) # depends on [control=['try'], data=[]] except ValueError: # e.g. *; we default to most level = 5 # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chunk']] # store it _categories[category] = level
def write(self, text: str): """ Prints text to the screen. Supports colors by using the color constants. To use colors, add the color before the text you want to print. :param text: The text to print. """ # Default color is NORMAL. last_color = (self._DARK_CODE, 0) # We use splitlines with keepends in order to keep the line breaks. # Then we split by using the console width. original_lines = text.splitlines(True) lines = self._split_lines(original_lines) if self._width_limit else original_lines # Print the new width-formatted lines. for line in lines: # Print indents only at line beginnings. if not self._in_line: self._writer.write(' ' * self.indents_sum) # Remove colors if needed. if not self._colors: for color_code in self._ANSI_REGEXP.findall(line): line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '') elif not self._ANSI_REGEXP.match(line): # Check if the line starts with a color. If not, we apply the color from the last line. line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line # Print the final line. self._writer.write(line) # Update the in_line status. self._in_line = not line.endswith(self.LINE_SEP) # Update the last color used. if self._colors: last_color = self._ANSI_REGEXP.findall(line)[-1] # Update last position (if there was no line break in the end). if len(lines) > 0: last_line = lines[-1] if not last_line.endswith(self.LINE_SEP): # Strip the colors to figure out the real number of characters in the line. if self._colors: for color_code in self._ANSI_REGEXP.findall(last_line): last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '') self._last_position += len(last_line) else: self._last_position = 0 self._is_first_line = False else: self._last_position = 0 # Reset colors for the next print. if self._colors and not text.endswith(self.NORMAL): self._writer.write(self.NORMAL)
def function[write, parameter[self, text]]: constant[ Prints text to the screen. Supports colors by using the color constants. To use colors, add the color before the text you want to print. :param text: The text to print. ] variable[last_color] assign[=] tuple[[<ast.Attribute object at 0x7da1b2347d90>, <ast.Constant object at 0x7da1b2344550>]] variable[original_lines] assign[=] call[name[text].splitlines, parameter[constant[True]]] variable[lines] assign[=] <ast.IfExp object at 0x7da1b2344820> for taget[name[line]] in starred[name[lines]] begin[:] if <ast.UnaryOp object at 0x7da1b23441c0> begin[:] call[name[self]._writer.write, parameter[binary_operation[constant[ ] * name[self].indents_sum]]] if <ast.UnaryOp object at 0x7da20c991ae0> begin[:] for taget[name[color_code]] in starred[call[name[self]._ANSI_REGEXP.findall, parameter[name[line]]]] begin[:] variable[line] assign[=] call[name[line].replace, parameter[binary_operation[name[self]._ANSI_COLOR_CODE <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20c990e20>, <ast.Call object at 0x7da20c992d40>]]], constant[]]] call[name[self]._writer.write, parameter[name[line]]] name[self]._in_line assign[=] <ast.UnaryOp object at 0x7da1b0b1b310> if name[self]._colors begin[:] variable[last_color] assign[=] call[call[name[self]._ANSI_REGEXP.findall, parameter[name[line]]]][<ast.UnaryOp object at 0x7da1b0b1baf0>] if compare[call[name[len], parameter[name[lines]]] greater[>] constant[0]] begin[:] variable[last_line] assign[=] call[name[lines]][<ast.UnaryOp object at 0x7da1b0b1b460>] if <ast.UnaryOp object at 0x7da1b0b1ba00> begin[:] if name[self]._colors begin[:] for taget[name[color_code]] in starred[call[name[self]._ANSI_REGEXP.findall, parameter[name[last_line]]]] begin[:] variable[last_line] assign[=] call[name[last_line].replace, parameter[binary_operation[name[self]._ANSI_COLOR_CODE <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b0b1b9d0>, <ast.Call object at 0x7da1b0b1b790>]]], constant[]]] <ast.AugAssign object at 0x7da18f00c880> if <ast.BoolOp object at 0x7da18f00dd80> begin[:] call[name[self]._writer.write, parameter[name[self].NORMAL]]
keyword[def] identifier[write] ( identifier[self] , identifier[text] : identifier[str] ): literal[string] identifier[last_color] =( identifier[self] . identifier[_DARK_CODE] , literal[int] ) identifier[original_lines] = identifier[text] . identifier[splitlines] ( keyword[True] ) identifier[lines] = identifier[self] . identifier[_split_lines] ( identifier[original_lines] ) keyword[if] identifier[self] . identifier[_width_limit] keyword[else] identifier[original_lines] keyword[for] identifier[line] keyword[in] identifier[lines] : keyword[if] keyword[not] identifier[self] . identifier[_in_line] : identifier[self] . identifier[_writer] . identifier[write] ( literal[string] * identifier[self] . identifier[indents_sum] ) keyword[if] keyword[not] identifier[self] . identifier[_colors] : keyword[for] identifier[color_code] keyword[in] identifier[self] . identifier[_ANSI_REGEXP] . identifier[findall] ( identifier[line] ): identifier[line] = identifier[line] . identifier[replace] ( identifier[self] . identifier[_ANSI_COLOR_CODE] %( identifier[color_code] [ literal[int] ], identifier[int] ( identifier[color_code] [ literal[int] ])), literal[string] ) keyword[elif] keyword[not] identifier[self] . identifier[_ANSI_REGEXP] . identifier[match] ( identifier[line] ): identifier[line] = identifier[self] . identifier[_ANSI_COLOR_CODE] %( identifier[last_color] [ literal[int] ], identifier[int] ( identifier[last_color] [ literal[int] ]))+ identifier[line] identifier[self] . identifier[_writer] . identifier[write] ( identifier[line] ) identifier[self] . identifier[_in_line] = keyword[not] identifier[line] . identifier[endswith] ( identifier[self] . identifier[LINE_SEP] ) keyword[if] identifier[self] . identifier[_colors] : identifier[last_color] = identifier[self] . identifier[_ANSI_REGEXP] . identifier[findall] ( identifier[line] )[- literal[int] ] keyword[if] identifier[len] ( identifier[lines] )> literal[int] : identifier[last_line] = identifier[lines] [- literal[int] ] keyword[if] keyword[not] identifier[last_line] . identifier[endswith] ( identifier[self] . identifier[LINE_SEP] ): keyword[if] identifier[self] . identifier[_colors] : keyword[for] identifier[color_code] keyword[in] identifier[self] . identifier[_ANSI_REGEXP] . identifier[findall] ( identifier[last_line] ): identifier[last_line] = identifier[last_line] . identifier[replace] ( identifier[self] . identifier[_ANSI_COLOR_CODE] %( identifier[color_code] [ literal[int] ], identifier[int] ( identifier[color_code] [ literal[int] ])), literal[string] ) identifier[self] . identifier[_last_position] += identifier[len] ( identifier[last_line] ) keyword[else] : identifier[self] . identifier[_last_position] = literal[int] identifier[self] . identifier[_is_first_line] = keyword[False] keyword[else] : identifier[self] . identifier[_last_position] = literal[int] keyword[if] identifier[self] . identifier[_colors] keyword[and] keyword[not] identifier[text] . identifier[endswith] ( identifier[self] . identifier[NORMAL] ): identifier[self] . identifier[_writer] . identifier[write] ( identifier[self] . identifier[NORMAL] )
def write(self, text: str): """ Prints text to the screen. Supports colors by using the color constants. To use colors, add the color before the text you want to print. :param text: The text to print. """ # Default color is NORMAL. last_color = (self._DARK_CODE, 0) # We use splitlines with keepends in order to keep the line breaks. # Then we split by using the console width. original_lines = text.splitlines(True) lines = self._split_lines(original_lines) if self._width_limit else original_lines # Print the new width-formatted lines. for line in lines: # Print indents only at line beginnings. if not self._in_line: self._writer.write(' ' * self.indents_sum) # depends on [control=['if'], data=[]] # Remove colors if needed. if not self._colors: for color_code in self._ANSI_REGEXP.findall(line): line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '') # depends on [control=['for'], data=['color_code']] # depends on [control=['if'], data=[]] elif not self._ANSI_REGEXP.match(line): # Check if the line starts with a color. If not, we apply the color from the last line. line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line # depends on [control=['if'], data=[]] # Print the final line. self._writer.write(line) # Update the in_line status. self._in_line = not line.endswith(self.LINE_SEP) # Update the last color used. if self._colors: last_color = self._ANSI_REGEXP.findall(line)[-1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # Update last position (if there was no line break in the end). if len(lines) > 0: last_line = lines[-1] if not last_line.endswith(self.LINE_SEP): # Strip the colors to figure out the real number of characters in the line. if self._colors: for color_code in self._ANSI_REGEXP.findall(last_line): last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '') # depends on [control=['for'], data=['color_code']] # depends on [control=['if'], data=[]] self._last_position += len(last_line) # depends on [control=['if'], data=[]] else: self._last_position = 0 self._is_first_line = False # depends on [control=['if'], data=[]] else: self._last_position = 0 # Reset colors for the next print. if self._colors and (not text.endswith(self.NORMAL)): self._writer.write(self.NORMAL) # depends on [control=['if'], data=[]]
def action(args): """ Show information about reference packages. """ log.info('loading reference package') pkg = refpkg.Refpkg(args.refpkg, create=False) with open(pkg.file_abspath('seq_info'), 'rU') as seq_info: seqinfo = list(csv.DictReader(seq_info)) snames = [row['seqname'] for row in seqinfo] if args.seq_names: print('\n'.join(snames)) elif args.tally: tally_taxa(pkg) elif args.lengths: print_lengths(pkg) else: print('number of sequences:', len(snames)) print('package components\n', '\n'.join(sorted(pkg.file_keys())))
def function[action, parameter[args]]: constant[ Show information about reference packages. ] call[name[log].info, parameter[constant[loading reference package]]] variable[pkg] assign[=] call[name[refpkg].Refpkg, parameter[name[args].refpkg]] with call[name[open], parameter[call[name[pkg].file_abspath, parameter[constant[seq_info]]], constant[rU]]] begin[:] variable[seqinfo] assign[=] call[name[list], parameter[call[name[csv].DictReader, parameter[name[seq_info]]]]] variable[snames] assign[=] <ast.ListComp object at 0x7da1b1a1e1a0> if name[args].seq_names begin[:] call[name[print], parameter[call[constant[ ].join, parameter[name[snames]]]]]
keyword[def] identifier[action] ( identifier[args] ): literal[string] identifier[log] . identifier[info] ( literal[string] ) identifier[pkg] = identifier[refpkg] . identifier[Refpkg] ( identifier[args] . identifier[refpkg] , identifier[create] = keyword[False] ) keyword[with] identifier[open] ( identifier[pkg] . identifier[file_abspath] ( literal[string] ), literal[string] ) keyword[as] identifier[seq_info] : identifier[seqinfo] = identifier[list] ( identifier[csv] . identifier[DictReader] ( identifier[seq_info] )) identifier[snames] =[ identifier[row] [ literal[string] ] keyword[for] identifier[row] keyword[in] identifier[seqinfo] ] keyword[if] identifier[args] . identifier[seq_names] : identifier[print] ( literal[string] . identifier[join] ( identifier[snames] )) keyword[elif] identifier[args] . identifier[tally] : identifier[tally_taxa] ( identifier[pkg] ) keyword[elif] identifier[args] . identifier[lengths] : identifier[print_lengths] ( identifier[pkg] ) keyword[else] : identifier[print] ( literal[string] , identifier[len] ( identifier[snames] )) identifier[print] ( literal[string] , literal[string] . identifier[join] ( identifier[sorted] ( identifier[pkg] . identifier[file_keys] ())))
def action(args): """ Show information about reference packages. """ log.info('loading reference package') pkg = refpkg.Refpkg(args.refpkg, create=False) with open(pkg.file_abspath('seq_info'), 'rU') as seq_info: seqinfo = list(csv.DictReader(seq_info)) snames = [row['seqname'] for row in seqinfo] # depends on [control=['with'], data=['seq_info']] if args.seq_names: print('\n'.join(snames)) # depends on [control=['if'], data=[]] elif args.tally: tally_taxa(pkg) # depends on [control=['if'], data=[]] elif args.lengths: print_lengths(pkg) # depends on [control=['if'], data=[]] else: print('number of sequences:', len(snames)) print('package components\n', '\n'.join(sorted(pkg.file_keys())))
def handle_markdown(value): md = markdown( value, extensions=[ 'markdown.extensions.fenced_code', 'codehilite', ] ) """ For some unknown reason markdown wraps the value in <p> tags. Currently there doesn't seem to be an extension to turn this off. """ open_tag = '<p>' close_tag = '</p>' if md.startswith(open_tag) and md.endswith(close_tag): md = md[len(open_tag):-len(close_tag)] return mark_safe(md)
def function[handle_markdown, parameter[value]]: variable[md] assign[=] call[name[markdown], parameter[name[value]]] constant[ For some unknown reason markdown wraps the value in <p> tags. Currently there doesn't seem to be an extension to turn this off. ] variable[open_tag] assign[=] constant[<p>] variable[close_tag] assign[=] constant[</p>] if <ast.BoolOp object at 0x7da18ede4a60> begin[:] variable[md] assign[=] call[name[md]][<ast.Slice object at 0x7da18ede5de0>] return[call[name[mark_safe], parameter[name[md]]]]
keyword[def] identifier[handle_markdown] ( identifier[value] ): identifier[md] = identifier[markdown] ( identifier[value] , identifier[extensions] =[ literal[string] , literal[string] , ] ) literal[string] identifier[open_tag] = literal[string] identifier[close_tag] = literal[string] keyword[if] identifier[md] . identifier[startswith] ( identifier[open_tag] ) keyword[and] identifier[md] . identifier[endswith] ( identifier[close_tag] ): identifier[md] = identifier[md] [ identifier[len] ( identifier[open_tag] ):- identifier[len] ( identifier[close_tag] )] keyword[return] identifier[mark_safe] ( identifier[md] )
def handle_markdown(value): md = markdown(value, extensions=['markdown.extensions.fenced_code', 'codehilite']) " For some unknown reason markdown wraps the value in <p> tags.\n Currently there doesn't seem to be an extension to turn this off.\n " open_tag = '<p>' close_tag = '</p>' if md.startswith(open_tag) and md.endswith(close_tag): md = md[len(open_tag):-len(close_tag)] # depends on [control=['if'], data=[]] return mark_safe(md)
def gaussian_gradient_magnitude(image, sigma = 5, voxelspacing = None, mask = slice(None)): r""" Computes the gradient magnitude (edge-detection) of the supplied image using gaussian derivates and returns the intensity values. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). sigma : number or sequence of numbers Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm. voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- gaussian_gradient_magnitude : ndarray The gaussian gradient magnitude of the supplied image. """ return _extract_feature(_extract_gaussian_gradient_magnitude, image, mask, sigma = sigma, voxelspacing = voxelspacing)
def function[gaussian_gradient_magnitude, parameter[image, sigma, voxelspacing, mask]]: constant[ Computes the gradient magnitude (edge-detection) of the supplied image using gaussian derivates and returns the intensity values. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). sigma : number or sequence of numbers Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm. voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- gaussian_gradient_magnitude : ndarray The gaussian gradient magnitude of the supplied image. ] return[call[name[_extract_feature], parameter[name[_extract_gaussian_gradient_magnitude], name[image], name[mask]]]]
keyword[def] identifier[gaussian_gradient_magnitude] ( identifier[image] , identifier[sigma] = literal[int] , identifier[voxelspacing] = keyword[None] , identifier[mask] = identifier[slice] ( keyword[None] )): literal[string] keyword[return] identifier[_extract_feature] ( identifier[_extract_gaussian_gradient_magnitude] , identifier[image] , identifier[mask] , identifier[sigma] = identifier[sigma] , identifier[voxelspacing] = identifier[voxelspacing] )
def gaussian_gradient_magnitude(image, sigma=5, voxelspacing=None, mask=slice(None)): """ Computes the gradient magnitude (edge-detection) of the supplied image using gaussian derivates and returns the intensity values. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). sigma : number or sequence of numbers Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm. voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- gaussian_gradient_magnitude : ndarray The gaussian gradient magnitude of the supplied image. """ return _extract_feature(_extract_gaussian_gradient_magnitude, image, mask, sigma=sigma, voxelspacing=voxelspacing)
def stop_artifact_creation(self, id_or_uri, task_uri): """ Stops creation of the selected Artifact Bundle. Args: id_or_uri: ID or URI of the Artifact Bundle. task_uri: Task URI associated with the Artifact Bundle. Returns: string: """ data = { "taskUri": task_uri } uri = self.URI + '/' + extract_id_from_uri(id_or_uri) + self.STOP_CREATION_PATH return self._client.update(data, uri=uri)
def function[stop_artifact_creation, parameter[self, id_or_uri, task_uri]]: constant[ Stops creation of the selected Artifact Bundle. Args: id_or_uri: ID or URI of the Artifact Bundle. task_uri: Task URI associated with the Artifact Bundle. Returns: string: ] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20c76e830>], [<ast.Name object at 0x7da20c76faf0>]] variable[uri] assign[=] binary_operation[binary_operation[binary_operation[name[self].URI + constant[/]] + call[name[extract_id_from_uri], parameter[name[id_or_uri]]]] + name[self].STOP_CREATION_PATH] return[call[name[self]._client.update, parameter[name[data]]]]
keyword[def] identifier[stop_artifact_creation] ( identifier[self] , identifier[id_or_uri] , identifier[task_uri] ): literal[string] identifier[data] ={ literal[string] : identifier[task_uri] } identifier[uri] = identifier[self] . identifier[URI] + literal[string] + identifier[extract_id_from_uri] ( identifier[id_or_uri] )+ identifier[self] . identifier[STOP_CREATION_PATH] keyword[return] identifier[self] . identifier[_client] . identifier[update] ( identifier[data] , identifier[uri] = identifier[uri] )
def stop_artifact_creation(self, id_or_uri, task_uri): """ Stops creation of the selected Artifact Bundle. Args: id_or_uri: ID or URI of the Artifact Bundle. task_uri: Task URI associated with the Artifact Bundle. Returns: string: """ data = {'taskUri': task_uri} uri = self.URI + '/' + extract_id_from_uri(id_or_uri) + self.STOP_CREATION_PATH return self._client.update(data, uri=uri)
def set_variable(self, key, value, per_reference=False, access_key=None, data_type=None): """Sets a global variable :param key: the key of the global variable to be set :param value: the new value of the global variable :param per_reference: a flag to decide if the variable should be stored per reference or per value :param access_key: if the variable was explicitly locked with the rafcon.state lock_variable :raises exceptions.RuntimeError: if a wrong access key is passed """ key = str(key) # Ensure that we have the same string type for all keys (under Python2 and 3!) if self.variable_exist(key): if data_type is None: data_type = self.__global_variable_type_dictionary[key] else: if data_type is None: data_type = type(None) assert isinstance(data_type, type) self.check_value_and_type(value, data_type) with self.__global_lock: unlock = True if self.variable_exist(key): if self.is_locked(key) and self.__access_keys[key] != access_key: raise RuntimeError("Wrong access key for accessing global variable") elif self.is_locked(key): unlock = False else: access_key = self.lock_variable(key, block=True) else: self.__variable_locks[key] = Lock() access_key = self.lock_variable(key, block=True) # --- variable locked if per_reference: self.__global_variable_dictionary[key] = value self.__global_variable_type_dictionary[key] = data_type self.__variable_references[key] = True else: self.__global_variable_dictionary[key] = copy.deepcopy(value) self.__global_variable_type_dictionary[key] = data_type self.__variable_references[key] = False # --- release variable if unlock: self.unlock_variable(key, access_key) logger.debug("Global variable '{}' was set to value '{}' with type '{}'".format(key, value, data_type.__name__))
def function[set_variable, parameter[self, key, value, per_reference, access_key, data_type]]: constant[Sets a global variable :param key: the key of the global variable to be set :param value: the new value of the global variable :param per_reference: a flag to decide if the variable should be stored per reference or per value :param access_key: if the variable was explicitly locked with the rafcon.state lock_variable :raises exceptions.RuntimeError: if a wrong access key is passed ] variable[key] assign[=] call[name[str], parameter[name[key]]] if call[name[self].variable_exist, parameter[name[key]]] begin[:] if compare[name[data_type] is constant[None]] begin[:] variable[data_type] assign[=] call[name[self].__global_variable_type_dictionary][name[key]] assert[call[name[isinstance], parameter[name[data_type], name[type]]]] call[name[self].check_value_and_type, parameter[name[value], name[data_type]]] with name[self].__global_lock begin[:] variable[unlock] assign[=] constant[True] if call[name[self].variable_exist, parameter[name[key]]] begin[:] if <ast.BoolOp object at 0x7da1b1a10e20> begin[:] <ast.Raise object at 0x7da1b1a12ce0> if name[per_reference] begin[:] call[name[self].__global_variable_dictionary][name[key]] assign[=] name[value] call[name[self].__global_variable_type_dictionary][name[key]] assign[=] name[data_type] call[name[self].__variable_references][name[key]] assign[=] constant[True] if name[unlock] begin[:] call[name[self].unlock_variable, parameter[name[key], name[access_key]]] call[name[logger].debug, parameter[call[constant[Global variable '{}' was set to value '{}' with type '{}'].format, parameter[name[key], name[value], name[data_type].__name__]]]]
keyword[def] identifier[set_variable] ( identifier[self] , identifier[key] , identifier[value] , identifier[per_reference] = keyword[False] , identifier[access_key] = keyword[None] , identifier[data_type] = keyword[None] ): literal[string] identifier[key] = identifier[str] ( identifier[key] ) keyword[if] identifier[self] . identifier[variable_exist] ( identifier[key] ): keyword[if] identifier[data_type] keyword[is] keyword[None] : identifier[data_type] = identifier[self] . identifier[__global_variable_type_dictionary] [ identifier[key] ] keyword[else] : keyword[if] identifier[data_type] keyword[is] keyword[None] : identifier[data_type] = identifier[type] ( keyword[None] ) keyword[assert] identifier[isinstance] ( identifier[data_type] , identifier[type] ) identifier[self] . identifier[check_value_and_type] ( identifier[value] , identifier[data_type] ) keyword[with] identifier[self] . identifier[__global_lock] : identifier[unlock] = keyword[True] keyword[if] identifier[self] . identifier[variable_exist] ( identifier[key] ): keyword[if] identifier[self] . identifier[is_locked] ( identifier[key] ) keyword[and] identifier[self] . identifier[__access_keys] [ identifier[key] ]!= identifier[access_key] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[elif] identifier[self] . identifier[is_locked] ( identifier[key] ): identifier[unlock] = keyword[False] keyword[else] : identifier[access_key] = identifier[self] . identifier[lock_variable] ( identifier[key] , identifier[block] = keyword[True] ) keyword[else] : identifier[self] . identifier[__variable_locks] [ identifier[key] ]= identifier[Lock] () identifier[access_key] = identifier[self] . identifier[lock_variable] ( identifier[key] , identifier[block] = keyword[True] ) keyword[if] identifier[per_reference] : identifier[self] . identifier[__global_variable_dictionary] [ identifier[key] ]= identifier[value] identifier[self] . identifier[__global_variable_type_dictionary] [ identifier[key] ]= identifier[data_type] identifier[self] . identifier[__variable_references] [ identifier[key] ]= keyword[True] keyword[else] : identifier[self] . identifier[__global_variable_dictionary] [ identifier[key] ]= identifier[copy] . identifier[deepcopy] ( identifier[value] ) identifier[self] . identifier[__global_variable_type_dictionary] [ identifier[key] ]= identifier[data_type] identifier[self] . identifier[__variable_references] [ identifier[key] ]= keyword[False] keyword[if] identifier[unlock] : identifier[self] . identifier[unlock_variable] ( identifier[key] , identifier[access_key] ) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[key] , identifier[value] , identifier[data_type] . identifier[__name__] ))
def set_variable(self, key, value, per_reference=False, access_key=None, data_type=None): """Sets a global variable :param key: the key of the global variable to be set :param value: the new value of the global variable :param per_reference: a flag to decide if the variable should be stored per reference or per value :param access_key: if the variable was explicitly locked with the rafcon.state lock_variable :raises exceptions.RuntimeError: if a wrong access key is passed """ key = str(key) # Ensure that we have the same string type for all keys (under Python2 and 3!) if self.variable_exist(key): if data_type is None: data_type = self.__global_variable_type_dictionary[key] # depends on [control=['if'], data=['data_type']] # depends on [control=['if'], data=[]] elif data_type is None: data_type = type(None) # depends on [control=['if'], data=['data_type']] assert isinstance(data_type, type) self.check_value_and_type(value, data_type) with self.__global_lock: unlock = True if self.variable_exist(key): if self.is_locked(key) and self.__access_keys[key] != access_key: raise RuntimeError('Wrong access key for accessing global variable') # depends on [control=['if'], data=[]] elif self.is_locked(key): unlock = False # depends on [control=['if'], data=[]] else: access_key = self.lock_variable(key, block=True) # depends on [control=['if'], data=[]] else: self.__variable_locks[key] = Lock() access_key = self.lock_variable(key, block=True) # --- variable locked if per_reference: self.__global_variable_dictionary[key] = value self.__global_variable_type_dictionary[key] = data_type self.__variable_references[key] = True # depends on [control=['if'], data=[]] else: self.__global_variable_dictionary[key] = copy.deepcopy(value) self.__global_variable_type_dictionary[key] = data_type self.__variable_references[key] = False # --- release variable if unlock: self.unlock_variable(key, access_key) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] logger.debug("Global variable '{}' was set to value '{}' with type '{}'".format(key, value, data_type.__name__))
def get_element_tail(parent_to_parse, element_path=None, default_value=u''): """ :return: text following the parsed parent element if it exists, otherwise the default value. :see: get_element(parent_to_parse, element_path) """ parent_element = get_element(parent_to_parse, element_path) if parent_element is None: return default_value if parent_element.tail: return parent_element.tail.strip() or default_value return default_value
def function[get_element_tail, parameter[parent_to_parse, element_path, default_value]]: constant[ :return: text following the parsed parent element if it exists, otherwise the default value. :see: get_element(parent_to_parse, element_path) ] variable[parent_element] assign[=] call[name[get_element], parameter[name[parent_to_parse], name[element_path]]] if compare[name[parent_element] is constant[None]] begin[:] return[name[default_value]] if name[parent_element].tail begin[:] return[<ast.BoolOp object at 0x7da1b27745b0>] return[name[default_value]]
keyword[def] identifier[get_element_tail] ( identifier[parent_to_parse] , identifier[element_path] = keyword[None] , identifier[default_value] = literal[string] ): literal[string] identifier[parent_element] = identifier[get_element] ( identifier[parent_to_parse] , identifier[element_path] ) keyword[if] identifier[parent_element] keyword[is] keyword[None] : keyword[return] identifier[default_value] keyword[if] identifier[parent_element] . identifier[tail] : keyword[return] identifier[parent_element] . identifier[tail] . identifier[strip] () keyword[or] identifier[default_value] keyword[return] identifier[default_value]
def get_element_tail(parent_to_parse, element_path=None, default_value=u''): """ :return: text following the parsed parent element if it exists, otherwise the default value. :see: get_element(parent_to_parse, element_path) """ parent_element = get_element(parent_to_parse, element_path) if parent_element is None: return default_value # depends on [control=['if'], data=[]] if parent_element.tail: return parent_element.tail.strip() or default_value # depends on [control=['if'], data=[]] return default_value
def add_function(self, function_id=None, function=None, inputs=None, outputs=None, input_domain=None, weight=None, inp_weight=None, out_weight=None, description=None, filters=None, await_domain=None, await_result=None, **kwargs): """ Add a single function node to dispatcher. :param function_id: Function node id. If None will be assigned as <fun.__name__>. :type function_id: str, optional :param function: Data node estimation function. :type function: callable, optional :param inputs: Ordered arguments (i.e., data node ids) needed by the function. :type inputs: list, optional :param outputs: Ordered results (i.e., data node ids) returned by the function. :type outputs: list, optional :param input_domain: A function that checks if input values satisfy the function domain. This can be any function that takes the same inputs of the function and returns True if input values satisfy the domain, otherwise False. In this case the dispatch algorithm doesn't pass on the node. :type input_domain: callable, optional :param weight: Node weight. It is a weight coefficient that is used by the dispatch algorithm to estimate the minimum workflow. :type weight: float, int, optional :param inp_weight: Edge weights from data nodes to the function node. It is a dictionary (key=data node id) with the weight coefficients used by the dispatch algorithm to estimate the minimum workflow. :type inp_weight: dict[str, float | int], optional :param out_weight: Edge weights from the function node to data nodes. It is a dictionary (key=data node id) with the weight coefficients used by the dispatch algorithm to estimate the minimum workflow. :type out_weight: dict[str, float | int], optional :param description: Function node's description. :type description: str, optional :param filters: A list of functions that are invoked after the invocation of the main function. :type filters: list[function], optional :param await_domain: If True the Dispatcher waits all input results before executing the `input_domain` function. If a number is defined this is used as `timeout` for `Future.result` method [default: True]. Note this is used when asynchronous or parallel execution is enable. :type await_domain: bool|int|float, optional :param await_result: If True the Dispatcher waits output results before assigning them to the workflow. If a number is defined this is used as `timeout` for `Future.result` method [default: False]. Note this is used when asynchronous or parallel execution is enable. :type await_result: bool|int|float, optional :param kwargs: Set additional node attributes using key=value. :type kwargs: keyword arguments, optional :return: Function node id. :rtype: str .. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_dispatcher`, :func:`add_from_lists` **--------------------------------------------------------------------** **Example**: .. testsetup:: >>> dsp = Dispatcher(name='Dispatcher') Add a function node:: >>> def my_function(a, b): ... c = a + b ... d = a - b ... return c, d ... >>> dsp.add_function(function=my_function, inputs=['a', 'b'], ... outputs=['c', 'd']) 'my_function' Add a function node with domain:: >>> from math import log >>> def my_log(a, b): ... return log(b - a) ... >>> def my_domain(a, b): ... return a < b ... >>> dsp.add_function(function=my_log, inputs=['a', 'b'], ... outputs=['e'], input_domain=my_domain) 'my_log' """ from .utils.blue import _init function = _init(function) if inputs is None: # Set a dummy input. if START not in self.nodes: self.add_data(START) inputs = [START] # Update inputs. if outputs is None: # Set a dummy output. if SINK not in self.nodes: self.add_data(SINK) outputs = [SINK] # Update outputs. # Get parent function. func = parent_func(function) # Base function node attributes. attr_dict = { 'type': 'function', 'inputs': inputs, 'outputs': outputs, 'function': function, 'wait_inputs': True, 'index': (self.counter(),) } if input_domain: # Add domain as node attribute. attr_dict['input_domain'] = input_domain if await_domain is not None: # Add await_domain as node attribute. attr_dict['await_domain'] = await_domain if await_result is not None: # Add await_result as node attribute. attr_dict['await_result'] = await_result if description is not None: # Add description as node attribute. attr_dict['description'] = description if filters: # Add filters as node attribute. attr_dict['filters'] = filters # Set function name. if function_id is None: try: # Set function name. function_name = func.__name__ except AttributeError as ex: raise ValueError('Invalid function id due to:\n{}'.format(ex)) else: function_name = function_id from .utils.alg import get_unused_node_id # Get an unused node id. fun_id = get_unused_node_id(self.dmap, initial_guess=function_name) if weight is not None: # Add weight as node attribute. attr_dict['weight'] = weight attr_dict.update(kwargs) # Set additional attributes. # Add node to the dispatcher map. self.dmap.add_node(fun_id, **attr_dict) from .utils.alg import add_func_edges # Add input edges. n_data = add_func_edges(self, fun_id, inputs, inp_weight, True) # Add output edges. add_func_edges(self, fun_id, outputs, out_weight, False, n_data) return fun_id
def function[add_function, parameter[self, function_id, function, inputs, outputs, input_domain, weight, inp_weight, out_weight, description, filters, await_domain, await_result]]: constant[ Add a single function node to dispatcher. :param function_id: Function node id. If None will be assigned as <fun.__name__>. :type function_id: str, optional :param function: Data node estimation function. :type function: callable, optional :param inputs: Ordered arguments (i.e., data node ids) needed by the function. :type inputs: list, optional :param outputs: Ordered results (i.e., data node ids) returned by the function. :type outputs: list, optional :param input_domain: A function that checks if input values satisfy the function domain. This can be any function that takes the same inputs of the function and returns True if input values satisfy the domain, otherwise False. In this case the dispatch algorithm doesn't pass on the node. :type input_domain: callable, optional :param weight: Node weight. It is a weight coefficient that is used by the dispatch algorithm to estimate the minimum workflow. :type weight: float, int, optional :param inp_weight: Edge weights from data nodes to the function node. It is a dictionary (key=data node id) with the weight coefficients used by the dispatch algorithm to estimate the minimum workflow. :type inp_weight: dict[str, float | int], optional :param out_weight: Edge weights from the function node to data nodes. It is a dictionary (key=data node id) with the weight coefficients used by the dispatch algorithm to estimate the minimum workflow. :type out_weight: dict[str, float | int], optional :param description: Function node's description. :type description: str, optional :param filters: A list of functions that are invoked after the invocation of the main function. :type filters: list[function], optional :param await_domain: If True the Dispatcher waits all input results before executing the `input_domain` function. If a number is defined this is used as `timeout` for `Future.result` method [default: True]. Note this is used when asynchronous or parallel execution is enable. :type await_domain: bool|int|float, optional :param await_result: If True the Dispatcher waits output results before assigning them to the workflow. If a number is defined this is used as `timeout` for `Future.result` method [default: False]. Note this is used when asynchronous or parallel execution is enable. :type await_result: bool|int|float, optional :param kwargs: Set additional node attributes using key=value. :type kwargs: keyword arguments, optional :return: Function node id. :rtype: str .. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_dispatcher`, :func:`add_from_lists` **--------------------------------------------------------------------** **Example**: .. testsetup:: >>> dsp = Dispatcher(name='Dispatcher') Add a function node:: >>> def my_function(a, b): ... c = a + b ... d = a - b ... return c, d ... >>> dsp.add_function(function=my_function, inputs=['a', 'b'], ... outputs=['c', 'd']) 'my_function' Add a function node with domain:: >>> from math import log >>> def my_log(a, b): ... return log(b - a) ... >>> def my_domain(a, b): ... return a < b ... >>> dsp.add_function(function=my_log, inputs=['a', 'b'], ... outputs=['e'], input_domain=my_domain) 'my_log' ] from relative_module[utils.blue] import module[_init] variable[function] assign[=] call[name[_init], parameter[name[function]]] if compare[name[inputs] is constant[None]] begin[:] if compare[name[START] <ast.NotIn object at 0x7da2590d7190> name[self].nodes] begin[:] call[name[self].add_data, parameter[name[START]]] variable[inputs] assign[=] list[[<ast.Name object at 0x7da1b25d1900>]] if compare[name[outputs] is constant[None]] begin[:] if compare[name[SINK] <ast.NotIn object at 0x7da2590d7190> name[self].nodes] begin[:] call[name[self].add_data, parameter[name[SINK]]] variable[outputs] assign[=] list[[<ast.Name object at 0x7da1b25d1a50>]] variable[func] assign[=] call[name[parent_func], parameter[name[function]]] variable[attr_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b25d1360>, <ast.Constant object at 0x7da1b25d0370>, <ast.Constant object at 0x7da1b25d1870>, <ast.Constant object at 0x7da1b25d3eb0>, <ast.Constant object at 0x7da1b25d2770>, <ast.Constant object at 0x7da1b25d2170>], [<ast.Constant object at 0x7da1b25d2890>, <ast.Name object at 0x7da1b25d3070>, <ast.Name object at 0x7da1b25d22f0>, <ast.Name object at 0x7da1b25d1d50>, <ast.Constant object at 0x7da1b25d1120>, <ast.Tuple object at 0x7da1b25d2230>]] if name[input_domain] begin[:] call[name[attr_dict]][constant[input_domain]] assign[=] name[input_domain] if compare[name[await_domain] is_not constant[None]] begin[:] call[name[attr_dict]][constant[await_domain]] assign[=] name[await_domain] if compare[name[await_result] is_not constant[None]] begin[:] call[name[attr_dict]][constant[await_result]] assign[=] name[await_result] if compare[name[description] is_not constant[None]] begin[:] call[name[attr_dict]][constant[description]] assign[=] name[description] if name[filters] begin[:] call[name[attr_dict]][constant[filters]] assign[=] name[filters] if compare[name[function_id] is constant[None]] begin[:] <ast.Try object at 0x7da207f01480> from relative_module[utils.alg] import module[get_unused_node_id] variable[fun_id] assign[=] call[name[get_unused_node_id], parameter[name[self].dmap]] if compare[name[weight] is_not constant[None]] begin[:] call[name[attr_dict]][constant[weight]] assign[=] name[weight] call[name[attr_dict].update, parameter[name[kwargs]]] call[name[self].dmap.add_node, parameter[name[fun_id]]] from relative_module[utils.alg] import module[add_func_edges] variable[n_data] assign[=] call[name[add_func_edges], parameter[name[self], name[fun_id], name[inputs], name[inp_weight], constant[True]]] call[name[add_func_edges], parameter[name[self], name[fun_id], name[outputs], name[out_weight], constant[False], name[n_data]]] return[name[fun_id]]
keyword[def] identifier[add_function] ( identifier[self] , identifier[function_id] = keyword[None] , identifier[function] = keyword[None] , identifier[inputs] = keyword[None] , identifier[outputs] = keyword[None] , identifier[input_domain] = keyword[None] , identifier[weight] = keyword[None] , identifier[inp_weight] = keyword[None] , identifier[out_weight] = keyword[None] , identifier[description] = keyword[None] , identifier[filters] = keyword[None] , identifier[await_domain] = keyword[None] , identifier[await_result] = keyword[None] , ** identifier[kwargs] ): literal[string] keyword[from] . identifier[utils] . identifier[blue] keyword[import] identifier[_init] identifier[function] = identifier[_init] ( identifier[function] ) keyword[if] identifier[inputs] keyword[is] keyword[None] : keyword[if] identifier[START] keyword[not] keyword[in] identifier[self] . identifier[nodes] : identifier[self] . identifier[add_data] ( identifier[START] ) identifier[inputs] =[ identifier[START] ] keyword[if] identifier[outputs] keyword[is] keyword[None] : keyword[if] identifier[SINK] keyword[not] keyword[in] identifier[self] . identifier[nodes] : identifier[self] . identifier[add_data] ( identifier[SINK] ) identifier[outputs] =[ identifier[SINK] ] identifier[func] = identifier[parent_func] ( identifier[function] ) identifier[attr_dict] ={ literal[string] : literal[string] , literal[string] : identifier[inputs] , literal[string] : identifier[outputs] , literal[string] : identifier[function] , literal[string] : keyword[True] , literal[string] :( identifier[self] . identifier[counter] (),) } keyword[if] identifier[input_domain] : identifier[attr_dict] [ literal[string] ]= identifier[input_domain] keyword[if] identifier[await_domain] keyword[is] keyword[not] keyword[None] : identifier[attr_dict] [ literal[string] ]= identifier[await_domain] keyword[if] identifier[await_result] keyword[is] keyword[not] keyword[None] : identifier[attr_dict] [ literal[string] ]= identifier[await_result] keyword[if] identifier[description] keyword[is] keyword[not] keyword[None] : identifier[attr_dict] [ literal[string] ]= identifier[description] keyword[if] identifier[filters] : identifier[attr_dict] [ literal[string] ]= identifier[filters] keyword[if] identifier[function_id] keyword[is] keyword[None] : keyword[try] : identifier[function_name] = identifier[func] . identifier[__name__] keyword[except] identifier[AttributeError] keyword[as] identifier[ex] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[ex] )) keyword[else] : identifier[function_name] = identifier[function_id] keyword[from] . identifier[utils] . identifier[alg] keyword[import] identifier[get_unused_node_id] identifier[fun_id] = identifier[get_unused_node_id] ( identifier[self] . identifier[dmap] , identifier[initial_guess] = identifier[function_name] ) keyword[if] identifier[weight] keyword[is] keyword[not] keyword[None] : identifier[attr_dict] [ literal[string] ]= identifier[weight] identifier[attr_dict] . identifier[update] ( identifier[kwargs] ) identifier[self] . identifier[dmap] . identifier[add_node] ( identifier[fun_id] ,** identifier[attr_dict] ) keyword[from] . identifier[utils] . identifier[alg] keyword[import] identifier[add_func_edges] identifier[n_data] = identifier[add_func_edges] ( identifier[self] , identifier[fun_id] , identifier[inputs] , identifier[inp_weight] , keyword[True] ) identifier[add_func_edges] ( identifier[self] , identifier[fun_id] , identifier[outputs] , identifier[out_weight] , keyword[False] , identifier[n_data] ) keyword[return] identifier[fun_id]
def add_function(self, function_id=None, function=None, inputs=None, outputs=None, input_domain=None, weight=None, inp_weight=None, out_weight=None, description=None, filters=None, await_domain=None, await_result=None, **kwargs): """ Add a single function node to dispatcher. :param function_id: Function node id. If None will be assigned as <fun.__name__>. :type function_id: str, optional :param function: Data node estimation function. :type function: callable, optional :param inputs: Ordered arguments (i.e., data node ids) needed by the function. :type inputs: list, optional :param outputs: Ordered results (i.e., data node ids) returned by the function. :type outputs: list, optional :param input_domain: A function that checks if input values satisfy the function domain. This can be any function that takes the same inputs of the function and returns True if input values satisfy the domain, otherwise False. In this case the dispatch algorithm doesn't pass on the node. :type input_domain: callable, optional :param weight: Node weight. It is a weight coefficient that is used by the dispatch algorithm to estimate the minimum workflow. :type weight: float, int, optional :param inp_weight: Edge weights from data nodes to the function node. It is a dictionary (key=data node id) with the weight coefficients used by the dispatch algorithm to estimate the minimum workflow. :type inp_weight: dict[str, float | int], optional :param out_weight: Edge weights from the function node to data nodes. It is a dictionary (key=data node id) with the weight coefficients used by the dispatch algorithm to estimate the minimum workflow. :type out_weight: dict[str, float | int], optional :param description: Function node's description. :type description: str, optional :param filters: A list of functions that are invoked after the invocation of the main function. :type filters: list[function], optional :param await_domain: If True the Dispatcher waits all input results before executing the `input_domain` function. If a number is defined this is used as `timeout` for `Future.result` method [default: True]. Note this is used when asynchronous or parallel execution is enable. :type await_domain: bool|int|float, optional :param await_result: If True the Dispatcher waits output results before assigning them to the workflow. If a number is defined this is used as `timeout` for `Future.result` method [default: False]. Note this is used when asynchronous or parallel execution is enable. :type await_result: bool|int|float, optional :param kwargs: Set additional node attributes using key=value. :type kwargs: keyword arguments, optional :return: Function node id. :rtype: str .. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_dispatcher`, :func:`add_from_lists` **--------------------------------------------------------------------** **Example**: .. testsetup:: >>> dsp = Dispatcher(name='Dispatcher') Add a function node:: >>> def my_function(a, b): ... c = a + b ... d = a - b ... return c, d ... >>> dsp.add_function(function=my_function, inputs=['a', 'b'], ... outputs=['c', 'd']) 'my_function' Add a function node with domain:: >>> from math import log >>> def my_log(a, b): ... return log(b - a) ... >>> def my_domain(a, b): ... return a < b ... >>> dsp.add_function(function=my_log, inputs=['a', 'b'], ... outputs=['e'], input_domain=my_domain) 'my_log' """ from .utils.blue import _init function = _init(function) if inputs is None: # Set a dummy input. if START not in self.nodes: self.add_data(START) # depends on [control=['if'], data=['START']] inputs = [START] # Update inputs. # depends on [control=['if'], data=['inputs']] if outputs is None: # Set a dummy output. if SINK not in self.nodes: self.add_data(SINK) # depends on [control=['if'], data=['SINK']] outputs = [SINK] # Update outputs. # depends on [control=['if'], data=['outputs']] # Get parent function. func = parent_func(function) # Base function node attributes. attr_dict = {'type': 'function', 'inputs': inputs, 'outputs': outputs, 'function': function, 'wait_inputs': True, 'index': (self.counter(),)} if input_domain: # Add domain as node attribute. attr_dict['input_domain'] = input_domain # depends on [control=['if'], data=[]] if await_domain is not None: # Add await_domain as node attribute. attr_dict['await_domain'] = await_domain # depends on [control=['if'], data=['await_domain']] if await_result is not None: # Add await_result as node attribute. attr_dict['await_result'] = await_result # depends on [control=['if'], data=['await_result']] if description is not None: # Add description as node attribute. attr_dict['description'] = description # depends on [control=['if'], data=['description']] if filters: # Add filters as node attribute. attr_dict['filters'] = filters # depends on [control=['if'], data=[]] # Set function name. if function_id is None: try: # Set function name. function_name = func.__name__ # depends on [control=['try'], data=[]] except AttributeError as ex: raise ValueError('Invalid function id due to:\n{}'.format(ex)) # depends on [control=['except'], data=['ex']] # depends on [control=['if'], data=[]] else: function_name = function_id from .utils.alg import get_unused_node_id # Get an unused node id. fun_id = get_unused_node_id(self.dmap, initial_guess=function_name) if weight is not None: # Add weight as node attribute. attr_dict['weight'] = weight # depends on [control=['if'], data=['weight']] attr_dict.update(kwargs) # Set additional attributes. # Add node to the dispatcher map. self.dmap.add_node(fun_id, **attr_dict) from .utils.alg import add_func_edges # Add input edges. n_data = add_func_edges(self, fun_id, inputs, inp_weight, True) # Add output edges. add_func_edges(self, fun_id, outputs, out_weight, False, n_data) return fun_id
def _parse_remote_model(self, context): """ parse the remote resource model and adds its full name :type context: models.QualiDriverModels.ResourceRemoteCommandContext """ if not context.remote_endpoints: raise Exception('no remote resources found in context: {0}', jsonpickle.encode(context, unpicklable=False)) resource = context.remote_endpoints[0] dictionary = jsonpickle.decode(resource.app_context.deployed_app_json) holder = DeployDataHolder(dictionary) app_resource_detail = GenericDeployedAppResourceModel() app_resource_detail.vm_uuid = holder.vmdetails.uid app_resource_detail.cloud_provider = context.resource.fullname app_resource_detail.fullname = resource.fullname if hasattr(holder.vmdetails, 'vmCustomParams'): app_resource_detail.vm_custom_params = holder.vmdetails.vmCustomParams return app_resource_detail
def function[_parse_remote_model, parameter[self, context]]: constant[ parse the remote resource model and adds its full name :type context: models.QualiDriverModels.ResourceRemoteCommandContext ] if <ast.UnaryOp object at 0x7da18dc99180> begin[:] <ast.Raise object at 0x7da18dc98f40> variable[resource] assign[=] call[name[context].remote_endpoints][constant[0]] variable[dictionary] assign[=] call[name[jsonpickle].decode, parameter[name[resource].app_context.deployed_app_json]] variable[holder] assign[=] call[name[DeployDataHolder], parameter[name[dictionary]]] variable[app_resource_detail] assign[=] call[name[GenericDeployedAppResourceModel], parameter[]] name[app_resource_detail].vm_uuid assign[=] name[holder].vmdetails.uid name[app_resource_detail].cloud_provider assign[=] name[context].resource.fullname name[app_resource_detail].fullname assign[=] name[resource].fullname if call[name[hasattr], parameter[name[holder].vmdetails, constant[vmCustomParams]]] begin[:] name[app_resource_detail].vm_custom_params assign[=] name[holder].vmdetails.vmCustomParams return[name[app_resource_detail]]
keyword[def] identifier[_parse_remote_model] ( identifier[self] , identifier[context] ): literal[string] keyword[if] keyword[not] identifier[context] . identifier[remote_endpoints] : keyword[raise] identifier[Exception] ( literal[string] , identifier[jsonpickle] . identifier[encode] ( identifier[context] , identifier[unpicklable] = keyword[False] )) identifier[resource] = identifier[context] . identifier[remote_endpoints] [ literal[int] ] identifier[dictionary] = identifier[jsonpickle] . identifier[decode] ( identifier[resource] . identifier[app_context] . identifier[deployed_app_json] ) identifier[holder] = identifier[DeployDataHolder] ( identifier[dictionary] ) identifier[app_resource_detail] = identifier[GenericDeployedAppResourceModel] () identifier[app_resource_detail] . identifier[vm_uuid] = identifier[holder] . identifier[vmdetails] . identifier[uid] identifier[app_resource_detail] . identifier[cloud_provider] = identifier[context] . identifier[resource] . identifier[fullname] identifier[app_resource_detail] . identifier[fullname] = identifier[resource] . identifier[fullname] keyword[if] identifier[hasattr] ( identifier[holder] . identifier[vmdetails] , literal[string] ): identifier[app_resource_detail] . identifier[vm_custom_params] = identifier[holder] . identifier[vmdetails] . identifier[vmCustomParams] keyword[return] identifier[app_resource_detail]
def _parse_remote_model(self, context): """ parse the remote resource model and adds its full name :type context: models.QualiDriverModels.ResourceRemoteCommandContext """ if not context.remote_endpoints: raise Exception('no remote resources found in context: {0}', jsonpickle.encode(context, unpicklable=False)) # depends on [control=['if'], data=[]] resource = context.remote_endpoints[0] dictionary = jsonpickle.decode(resource.app_context.deployed_app_json) holder = DeployDataHolder(dictionary) app_resource_detail = GenericDeployedAppResourceModel() app_resource_detail.vm_uuid = holder.vmdetails.uid app_resource_detail.cloud_provider = context.resource.fullname app_resource_detail.fullname = resource.fullname if hasattr(holder.vmdetails, 'vmCustomParams'): app_resource_detail.vm_custom_params = holder.vmdetails.vmCustomParams # depends on [control=['if'], data=[]] return app_resource_detail
def _srcRect_x(self, attr_name): """ Value of `p:blipFill/a:srcRect/@{attr_name}` or 0.0 if not present. """ srcRect = self.blipFill.srcRect if srcRect is None: return 0.0 return getattr(srcRect, attr_name)
def function[_srcRect_x, parameter[self, attr_name]]: constant[ Value of `p:blipFill/a:srcRect/@{attr_name}` or 0.0 if not present. ] variable[srcRect] assign[=] name[self].blipFill.srcRect if compare[name[srcRect] is constant[None]] begin[:] return[constant[0.0]] return[call[name[getattr], parameter[name[srcRect], name[attr_name]]]]
keyword[def] identifier[_srcRect_x] ( identifier[self] , identifier[attr_name] ): literal[string] identifier[srcRect] = identifier[self] . identifier[blipFill] . identifier[srcRect] keyword[if] identifier[srcRect] keyword[is] keyword[None] : keyword[return] literal[int] keyword[return] identifier[getattr] ( identifier[srcRect] , identifier[attr_name] )
def _srcRect_x(self, attr_name): """ Value of `p:blipFill/a:srcRect/@{attr_name}` or 0.0 if not present. """ srcRect = self.blipFill.srcRect if srcRect is None: return 0.0 # depends on [control=['if'], data=[]] return getattr(srcRect, attr_name)
def OnPreferences(self, event): """Preferences event handler that launches preferences dialog""" preferences = self.interfaces.get_preferences_from_user() if preferences: for key in preferences: if type(config[key]) in (type(u""), type("")): config[key] = preferences[key] else: config[key] = ast.literal_eval(preferences[key]) self.main_window.grid.grid_renderer.cell_cache.clear() self.main_window.grid.ForceRefresh()
def function[OnPreferences, parameter[self, event]]: constant[Preferences event handler that launches preferences dialog] variable[preferences] assign[=] call[name[self].interfaces.get_preferences_from_user, parameter[]] if name[preferences] begin[:] for taget[name[key]] in starred[name[preferences]] begin[:] if compare[call[name[type], parameter[call[name[config]][name[key]]]] in tuple[[<ast.Call object at 0x7da1b16bf1f0>, <ast.Call object at 0x7da1b16bd630>]]] begin[:] call[name[config]][name[key]] assign[=] call[name[preferences]][name[key]] call[name[self].main_window.grid.grid_renderer.cell_cache.clear, parameter[]] call[name[self].main_window.grid.ForceRefresh, parameter[]]
keyword[def] identifier[OnPreferences] ( identifier[self] , identifier[event] ): literal[string] identifier[preferences] = identifier[self] . identifier[interfaces] . identifier[get_preferences_from_user] () keyword[if] identifier[preferences] : keyword[for] identifier[key] keyword[in] identifier[preferences] : keyword[if] identifier[type] ( identifier[config] [ identifier[key] ]) keyword[in] ( identifier[type] ( literal[string] ), identifier[type] ( literal[string] )): identifier[config] [ identifier[key] ]= identifier[preferences] [ identifier[key] ] keyword[else] : identifier[config] [ identifier[key] ]= identifier[ast] . identifier[literal_eval] ( identifier[preferences] [ identifier[key] ]) identifier[self] . identifier[main_window] . identifier[grid] . identifier[grid_renderer] . identifier[cell_cache] . identifier[clear] () identifier[self] . identifier[main_window] . identifier[grid] . identifier[ForceRefresh] ()
def OnPreferences(self, event): """Preferences event handler that launches preferences dialog""" preferences = self.interfaces.get_preferences_from_user() if preferences: for key in preferences: if type(config[key]) in (type(u''), type('')): config[key] = preferences[key] # depends on [control=['if'], data=[]] else: config[key] = ast.literal_eval(preferences[key]) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]] self.main_window.grid.grid_renderer.cell_cache.clear() self.main_window.grid.ForceRefresh()
def cmd_extract_email(infile, verbose, jsonout): """Extract email addresses from a file or stdin. Example: \b $ cat /var/log/auth.log | habu.extract.email [email protected] [email protected] [email protected] """ if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') data = infile.read() result = [] result = extract_email(data) if jsonout: print(json.dumps(result, indent=4)) else: print('\n'.join(result))
def function[cmd_extract_email, parameter[infile, verbose, jsonout]]: constant[Extract email addresses from a file or stdin. Example:  $ cat /var/log/auth.log | habu.extract.email [email protected] [email protected] [email protected] ] if name[verbose] begin[:] call[name[logging].basicConfig, parameter[]] variable[data] assign[=] call[name[infile].read, parameter[]] variable[result] assign[=] list[[]] variable[result] assign[=] call[name[extract_email], parameter[name[data]]] if name[jsonout] begin[:] call[name[print], parameter[call[name[json].dumps, parameter[name[result]]]]]
keyword[def] identifier[cmd_extract_email] ( identifier[infile] , identifier[verbose] , identifier[jsonout] ): literal[string] keyword[if] identifier[verbose] : identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[logging] . identifier[INFO] , identifier[format] = literal[string] ) identifier[data] = identifier[infile] . identifier[read] () identifier[result] =[] identifier[result] = identifier[extract_email] ( identifier[data] ) keyword[if] identifier[jsonout] : identifier[print] ( identifier[json] . identifier[dumps] ( identifier[result] , identifier[indent] = literal[int] )) keyword[else] : identifier[print] ( literal[string] . identifier[join] ( identifier[result] ))
def cmd_extract_email(infile, verbose, jsonout): """Extract email addresses from a file or stdin. Example: \x08 $ cat /var/log/auth.log | habu.extract.email [email protected] [email protected] [email protected] """ if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') # depends on [control=['if'], data=[]] data = infile.read() result = [] result = extract_email(data) if jsonout: print(json.dumps(result, indent=4)) # depends on [control=['if'], data=[]] else: print('\n'.join(result))
def light_bahdanau_attention(key, context, hidden_size, projected_align=False): """ It is a implementation of the Bahdanau et al. attention mechanism. Based on the paper: https://arxiv.org/abs/1409.0473 "Neural Machine Translation by Jointly Learning to Align and Translate" Args: key: A tensorflow tensor with dimensionality [None, None, key_size] context: A tensorflow tensor with dimensionality [None, None, max_num_tokens, token_size] hidden_size: Number of units in hidden representation projected_align: Using dense layer for hidden representation of context. If true, between input and attention mechanism insert a dense layer with dimensionality [hidden_size]. If false, a dense layer is not used. Returns: output: Tensor at the output with dimensionality [None, None, hidden_size] """ batch_size = tf.shape(context)[0] max_num_tokens, token_size = context.get_shape().as_list()[-2:] r_context = tf.reshape(context, shape=[-1, max_num_tokens, token_size]) # projected_key: [None, None, hidden_size] projected_key = tf.layers.dense(key, hidden_size, kernel_initializer=xav()) r_projected_key = \ tf.tile(tf.reshape(projected_key, shape=[-1, 1, hidden_size]), [1, max_num_tokens, 1]) # projected_context: [None, max_num_tokens, hidden_size] projected_context = \ tf.layers.dense(r_context, hidden_size, kernel_initializer=xav()) concat_h_state = tf.concat([projected_context, r_projected_key], -1) projected_state = \ tf.layers.dense(concat_h_state, hidden_size, use_bias=False, kernel_initializer=xav()) score = \ tf.layers.dense(tf.tanh(projected_state), units=1, use_bias=False, kernel_initializer=xav()) attn = tf.nn.softmax(score, dim=1) if projected_align: log.info("Using projected attention alignment") t_context = tf.transpose(projected_context, [0, 2, 1]) output = tf.reshape(tf.matmul(t_context, attn), shape=[batch_size, -1, hidden_size]) else: log.info("Using without projected attention alignment") t_context = tf.transpose(r_context, [0, 2, 1]) output = tf.reshape(tf.matmul(t_context, attn), shape=[batch_size, -1, token_size]) return output
def function[light_bahdanau_attention, parameter[key, context, hidden_size, projected_align]]: constant[ It is a implementation of the Bahdanau et al. attention mechanism. Based on the paper: https://arxiv.org/abs/1409.0473 "Neural Machine Translation by Jointly Learning to Align and Translate" Args: key: A tensorflow tensor with dimensionality [None, None, key_size] context: A tensorflow tensor with dimensionality [None, None, max_num_tokens, token_size] hidden_size: Number of units in hidden representation projected_align: Using dense layer for hidden representation of context. If true, between input and attention mechanism insert a dense layer with dimensionality [hidden_size]. If false, a dense layer is not used. Returns: output: Tensor at the output with dimensionality [None, None, hidden_size] ] variable[batch_size] assign[=] call[call[name[tf].shape, parameter[name[context]]]][constant[0]] <ast.Tuple object at 0x7da1b0371ea0> assign[=] call[call[call[name[context].get_shape, parameter[]].as_list, parameter[]]][<ast.Slice object at 0x7da1b0373b50>] variable[r_context] assign[=] call[name[tf].reshape, parameter[name[context]]] variable[projected_key] assign[=] call[name[tf].layers.dense, parameter[name[key], name[hidden_size]]] variable[r_projected_key] assign[=] call[name[tf].tile, parameter[call[name[tf].reshape, parameter[name[projected_key]]], list[[<ast.Constant object at 0x7da1b03721d0>, <ast.Name object at 0x7da1b0372d10>, <ast.Constant object at 0x7da1b0370ca0>]]]] variable[projected_context] assign[=] call[name[tf].layers.dense, parameter[name[r_context], name[hidden_size]]] variable[concat_h_state] assign[=] call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da1b0372cb0>, <ast.Name object at 0x7da1b0370bb0>]], <ast.UnaryOp object at 0x7da1b0370940>]] variable[projected_state] assign[=] call[name[tf].layers.dense, parameter[name[concat_h_state], name[hidden_size]]] variable[score] assign[=] call[name[tf].layers.dense, parameter[call[name[tf].tanh, parameter[name[projected_state]]]]] variable[attn] assign[=] call[name[tf].nn.softmax, parameter[name[score]]] if name[projected_align] begin[:] call[name[log].info, parameter[constant[Using projected attention alignment]]] variable[t_context] assign[=] call[name[tf].transpose, parameter[name[projected_context], list[[<ast.Constant object at 0x7da1b0372110>, <ast.Constant object at 0x7da1b0372560>, <ast.Constant object at 0x7da1b0370df0>]]]] variable[output] assign[=] call[name[tf].reshape, parameter[call[name[tf].matmul, parameter[name[t_context], name[attn]]]]] return[name[output]]
keyword[def] identifier[light_bahdanau_attention] ( identifier[key] , identifier[context] , identifier[hidden_size] , identifier[projected_align] = keyword[False] ): literal[string] identifier[batch_size] = identifier[tf] . identifier[shape] ( identifier[context] )[ literal[int] ] identifier[max_num_tokens] , identifier[token_size] = identifier[context] . identifier[get_shape] (). identifier[as_list] ()[- literal[int] :] identifier[r_context] = identifier[tf] . identifier[reshape] ( identifier[context] , identifier[shape] =[- literal[int] , identifier[max_num_tokens] , identifier[token_size] ]) identifier[projected_key] = identifier[tf] . identifier[layers] . identifier[dense] ( identifier[key] , identifier[hidden_size] , identifier[kernel_initializer] = identifier[xav] ()) identifier[r_projected_key] = identifier[tf] . identifier[tile] ( identifier[tf] . identifier[reshape] ( identifier[projected_key] , identifier[shape] =[- literal[int] , literal[int] , identifier[hidden_size] ]), [ literal[int] , identifier[max_num_tokens] , literal[int] ]) identifier[projected_context] = identifier[tf] . identifier[layers] . identifier[dense] ( identifier[r_context] , identifier[hidden_size] , identifier[kernel_initializer] = identifier[xav] ()) identifier[concat_h_state] = identifier[tf] . identifier[concat] ([ identifier[projected_context] , identifier[r_projected_key] ],- literal[int] ) identifier[projected_state] = identifier[tf] . identifier[layers] . identifier[dense] ( identifier[concat_h_state] , identifier[hidden_size] , identifier[use_bias] = keyword[False] , identifier[kernel_initializer] = identifier[xav] ()) identifier[score] = identifier[tf] . identifier[layers] . identifier[dense] ( identifier[tf] . identifier[tanh] ( identifier[projected_state] ), identifier[units] = literal[int] , identifier[use_bias] = keyword[False] , identifier[kernel_initializer] = identifier[xav] ()) identifier[attn] = identifier[tf] . identifier[nn] . identifier[softmax] ( identifier[score] , identifier[dim] = literal[int] ) keyword[if] identifier[projected_align] : identifier[log] . identifier[info] ( literal[string] ) identifier[t_context] = identifier[tf] . identifier[transpose] ( identifier[projected_context] ,[ literal[int] , literal[int] , literal[int] ]) identifier[output] = identifier[tf] . identifier[reshape] ( identifier[tf] . identifier[matmul] ( identifier[t_context] , identifier[attn] ), identifier[shape] =[ identifier[batch_size] ,- literal[int] , identifier[hidden_size] ]) keyword[else] : identifier[log] . identifier[info] ( literal[string] ) identifier[t_context] = identifier[tf] . identifier[transpose] ( identifier[r_context] ,[ literal[int] , literal[int] , literal[int] ]) identifier[output] = identifier[tf] . identifier[reshape] ( identifier[tf] . identifier[matmul] ( identifier[t_context] , identifier[attn] ), identifier[shape] =[ identifier[batch_size] ,- literal[int] , identifier[token_size] ]) keyword[return] identifier[output]
def light_bahdanau_attention(key, context, hidden_size, projected_align=False): """ It is a implementation of the Bahdanau et al. attention mechanism. Based on the paper: https://arxiv.org/abs/1409.0473 "Neural Machine Translation by Jointly Learning to Align and Translate" Args: key: A tensorflow tensor with dimensionality [None, None, key_size] context: A tensorflow tensor with dimensionality [None, None, max_num_tokens, token_size] hidden_size: Number of units in hidden representation projected_align: Using dense layer for hidden representation of context. If true, between input and attention mechanism insert a dense layer with dimensionality [hidden_size]. If false, a dense layer is not used. Returns: output: Tensor at the output with dimensionality [None, None, hidden_size] """ batch_size = tf.shape(context)[0] (max_num_tokens, token_size) = context.get_shape().as_list()[-2:] r_context = tf.reshape(context, shape=[-1, max_num_tokens, token_size]) # projected_key: [None, None, hidden_size] projected_key = tf.layers.dense(key, hidden_size, kernel_initializer=xav()) r_projected_key = tf.tile(tf.reshape(projected_key, shape=[-1, 1, hidden_size]), [1, max_num_tokens, 1]) # projected_context: [None, max_num_tokens, hidden_size] projected_context = tf.layers.dense(r_context, hidden_size, kernel_initializer=xav()) concat_h_state = tf.concat([projected_context, r_projected_key], -1) projected_state = tf.layers.dense(concat_h_state, hidden_size, use_bias=False, kernel_initializer=xav()) score = tf.layers.dense(tf.tanh(projected_state), units=1, use_bias=False, kernel_initializer=xav()) attn = tf.nn.softmax(score, dim=1) if projected_align: log.info('Using projected attention alignment') t_context = tf.transpose(projected_context, [0, 2, 1]) output = tf.reshape(tf.matmul(t_context, attn), shape=[batch_size, -1, hidden_size]) # depends on [control=['if'], data=[]] else: log.info('Using without projected attention alignment') t_context = tf.transpose(r_context, [0, 2, 1]) output = tf.reshape(tf.matmul(t_context, attn), shape=[batch_size, -1, token_size]) return output
def _round_to(dt, hour, minute, second): """ Route the given datetime to the latest time with the hour, minute, second before it. """ new_dt = dt.replace(hour=hour, minute=minute, second=second) if new_dt == dt: return new_dt elif new_dt < dt: before = new_dt after = new_dt + timedelta(days=1) elif new_dt > dt: before = new_dt - timedelta(days=1) after = new_dt d1 = dt - before d2 = after - dt if d1 < d2: return before elif d1 > d2: return after else: return before
def function[_round_to, parameter[dt, hour, minute, second]]: constant[ Route the given datetime to the latest time with the hour, minute, second before it. ] variable[new_dt] assign[=] call[name[dt].replace, parameter[]] if compare[name[new_dt] equal[==] name[dt]] begin[:] return[name[new_dt]] variable[d1] assign[=] binary_operation[name[dt] - name[before]] variable[d2] assign[=] binary_operation[name[after] - name[dt]] if compare[name[d1] less[<] name[d2]] begin[:] return[name[before]]
keyword[def] identifier[_round_to] ( identifier[dt] , identifier[hour] , identifier[minute] , identifier[second] ): literal[string] identifier[new_dt] = identifier[dt] . identifier[replace] ( identifier[hour] = identifier[hour] , identifier[minute] = identifier[minute] , identifier[second] = identifier[second] ) keyword[if] identifier[new_dt] == identifier[dt] : keyword[return] identifier[new_dt] keyword[elif] identifier[new_dt] < identifier[dt] : identifier[before] = identifier[new_dt] identifier[after] = identifier[new_dt] + identifier[timedelta] ( identifier[days] = literal[int] ) keyword[elif] identifier[new_dt] > identifier[dt] : identifier[before] = identifier[new_dt] - identifier[timedelta] ( identifier[days] = literal[int] ) identifier[after] = identifier[new_dt] identifier[d1] = identifier[dt] - identifier[before] identifier[d2] = identifier[after] - identifier[dt] keyword[if] identifier[d1] < identifier[d2] : keyword[return] identifier[before] keyword[elif] identifier[d1] > identifier[d2] : keyword[return] identifier[after] keyword[else] : keyword[return] identifier[before]
def _round_to(dt, hour, minute, second): """ Route the given datetime to the latest time with the hour, minute, second before it. """ new_dt = dt.replace(hour=hour, minute=minute, second=second) if new_dt == dt: return new_dt # depends on [control=['if'], data=['new_dt']] elif new_dt < dt: before = new_dt after = new_dt + timedelta(days=1) # depends on [control=['if'], data=['new_dt']] elif new_dt > dt: before = new_dt - timedelta(days=1) after = new_dt # depends on [control=['if'], data=['new_dt']] d1 = dt - before d2 = after - dt if d1 < d2: return before # depends on [control=['if'], data=[]] elif d1 > d2: return after # depends on [control=['if'], data=[]] else: return before
def _update_trsys(self, event): """Transform object(s) have changed for this Node; assign these to the visual's TransformSystem. """ doc = self.document_node scene = self.scene_node root = self.root_node self.transforms.visual_transform = self.node_transform(scene) self.transforms.scene_transform = scene.node_transform(doc) self.transforms.document_transform = doc.node_transform(root) Node._update_trsys(self, event)
def function[_update_trsys, parameter[self, event]]: constant[Transform object(s) have changed for this Node; assign these to the visual's TransformSystem. ] variable[doc] assign[=] name[self].document_node variable[scene] assign[=] name[self].scene_node variable[root] assign[=] name[self].root_node name[self].transforms.visual_transform assign[=] call[name[self].node_transform, parameter[name[scene]]] name[self].transforms.scene_transform assign[=] call[name[scene].node_transform, parameter[name[doc]]] name[self].transforms.document_transform assign[=] call[name[doc].node_transform, parameter[name[root]]] call[name[Node]._update_trsys, parameter[name[self], name[event]]]
keyword[def] identifier[_update_trsys] ( identifier[self] , identifier[event] ): literal[string] identifier[doc] = identifier[self] . identifier[document_node] identifier[scene] = identifier[self] . identifier[scene_node] identifier[root] = identifier[self] . identifier[root_node] identifier[self] . identifier[transforms] . identifier[visual_transform] = identifier[self] . identifier[node_transform] ( identifier[scene] ) identifier[self] . identifier[transforms] . identifier[scene_transform] = identifier[scene] . identifier[node_transform] ( identifier[doc] ) identifier[self] . identifier[transforms] . identifier[document_transform] = identifier[doc] . identifier[node_transform] ( identifier[root] ) identifier[Node] . identifier[_update_trsys] ( identifier[self] , identifier[event] )
def _update_trsys(self, event): """Transform object(s) have changed for this Node; assign these to the visual's TransformSystem. """ doc = self.document_node scene = self.scene_node root = self.root_node self.transforms.visual_transform = self.node_transform(scene) self.transforms.scene_transform = scene.node_transform(doc) self.transforms.document_transform = doc.node_transform(root) Node._update_trsys(self, event)
def measurement_time_typical(self): """Typical time in milliseconds required to complete a measurement in normal mode""" meas_time_ms = 1.0 if self.overscan_temperature != OVERSCAN_DISABLE: meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_temperature)) if self.overscan_pressure != OVERSCAN_DISABLE: meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_pressure) + 0.5) if self.overscan_humidity != OVERSCAN_DISABLE: meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_humidity) + 0.5) return meas_time_ms
def function[measurement_time_typical, parameter[self]]: constant[Typical time in milliseconds required to complete a measurement in normal mode] variable[meas_time_ms] assign[=] constant[1.0] if compare[name[self].overscan_temperature not_equal[!=] name[OVERSCAN_DISABLE]] begin[:] <ast.AugAssign object at 0x7da1b0d0dcc0> if compare[name[self].overscan_pressure not_equal[!=] name[OVERSCAN_DISABLE]] begin[:] <ast.AugAssign object at 0x7da1b0d0e4a0> if compare[name[self].overscan_humidity not_equal[!=] name[OVERSCAN_DISABLE]] begin[:] <ast.AugAssign object at 0x7da1b0d0d8a0> return[name[meas_time_ms]]
keyword[def] identifier[measurement_time_typical] ( identifier[self] ): literal[string] identifier[meas_time_ms] = literal[int] keyword[if] identifier[self] . identifier[overscan_temperature] != identifier[OVERSCAN_DISABLE] : identifier[meas_time_ms] +=( literal[int] * identifier[_BME280_OVERSCANS] . identifier[get] ( identifier[self] . identifier[overscan_temperature] )) keyword[if] identifier[self] . identifier[overscan_pressure] != identifier[OVERSCAN_DISABLE] : identifier[meas_time_ms] +=( literal[int] * identifier[_BME280_OVERSCANS] . identifier[get] ( identifier[self] . identifier[overscan_pressure] )+ literal[int] ) keyword[if] identifier[self] . identifier[overscan_humidity] != identifier[OVERSCAN_DISABLE] : identifier[meas_time_ms] +=( literal[int] * identifier[_BME280_OVERSCANS] . identifier[get] ( identifier[self] . identifier[overscan_humidity] )+ literal[int] ) keyword[return] identifier[meas_time_ms]
def measurement_time_typical(self): """Typical time in milliseconds required to complete a measurement in normal mode""" meas_time_ms = 1.0 if self.overscan_temperature != OVERSCAN_DISABLE: meas_time_ms += 2 * _BME280_OVERSCANS.get(self.overscan_temperature) # depends on [control=['if'], data=[]] if self.overscan_pressure != OVERSCAN_DISABLE: meas_time_ms += 2 * _BME280_OVERSCANS.get(self.overscan_pressure) + 0.5 # depends on [control=['if'], data=[]] if self.overscan_humidity != OVERSCAN_DISABLE: meas_time_ms += 2 * _BME280_OVERSCANS.get(self.overscan_humidity) + 0.5 # depends on [control=['if'], data=[]] return meas_time_ms
def context(self, context): """Sets the context that Selenium commands are running in using a `with` statement. The state of the context on the server is saved before entering the block, and restored upon exiting it. :param context: Context, may be one of the class properties `CONTEXT_CHROME` or `CONTEXT_CONTENT`. Usage example:: with selenium.context(selenium.CONTEXT_CHROME): # chrome scope ... do stuff ... """ initial_context = self.execute('GET_CONTEXT').pop('value') self.set_context(context) try: yield finally: self.set_context(initial_context)
def function[context, parameter[self, context]]: constant[Sets the context that Selenium commands are running in using a `with` statement. The state of the context on the server is saved before entering the block, and restored upon exiting it. :param context: Context, may be one of the class properties `CONTEXT_CHROME` or `CONTEXT_CONTENT`. Usage example:: with selenium.context(selenium.CONTEXT_CHROME): # chrome scope ... do stuff ... ] variable[initial_context] assign[=] call[call[name[self].execute, parameter[constant[GET_CONTEXT]]].pop, parameter[constant[value]]] call[name[self].set_context, parameter[name[context]]] <ast.Try object at 0x7da1b1e9a020>
keyword[def] identifier[context] ( identifier[self] , identifier[context] ): literal[string] identifier[initial_context] = identifier[self] . identifier[execute] ( literal[string] ). identifier[pop] ( literal[string] ) identifier[self] . identifier[set_context] ( identifier[context] ) keyword[try] : keyword[yield] keyword[finally] : identifier[self] . identifier[set_context] ( identifier[initial_context] )
def context(self, context): """Sets the context that Selenium commands are running in using a `with` statement. The state of the context on the server is saved before entering the block, and restored upon exiting it. :param context: Context, may be one of the class properties `CONTEXT_CHROME` or `CONTEXT_CONTENT`. Usage example:: with selenium.context(selenium.CONTEXT_CHROME): # chrome scope ... do stuff ... """ initial_context = self.execute('GET_CONTEXT').pop('value') self.set_context(context) try: yield # depends on [control=['try'], data=[]] finally: self.set_context(initial_context)
def _destroy(self, target_position_groups): """Destroy indicated position groups, handle any chain destructions, and return all destroyed groups.""" target_position_groups = list(target_position_groups) # work on a copy destroyed_tile_groups = list() blank = Tile.singleton('.') a = self._array while target_position_groups: # continue as long as more targets exist # delay actual clearing of destroyed tiles until all claiming # groups have been stored (e.g. overlapping matches, bombs) clear_after_storing = list() new_target_position_groups = list() for target_position_group in target_position_groups: destroyed_tile_group = list() for target_position in target_position_group: target_tile = a[target_position] # no handling for blanks that appear in destruction if target_tile.is_blank(): continue destroyed_tile_group.append(target_tile) clear_after_storing.append(target_position) # skull bombs require further destructions if target_tile.is_skullbomb(): new_positions = self.__skullbomb_radius(target_position) # convert individual positions to position groups new_position_groups = [(new_position,) for new_position in new_positions] new_target_position_groups.extend(new_position_groups) if destroyed_tile_group: destroyed_tile_groups.append(destroyed_tile_group) # Finally clear positions after all records have been made for position in clear_after_storing: a[position] = blank # Replace the completed target position groups with any new ones target_position_groups = new_target_position_groups return destroyed_tile_groups
def function[_destroy, parameter[self, target_position_groups]]: constant[Destroy indicated position groups, handle any chain destructions, and return all destroyed groups.] variable[target_position_groups] assign[=] call[name[list], parameter[name[target_position_groups]]] variable[destroyed_tile_groups] assign[=] call[name[list], parameter[]] variable[blank] assign[=] call[name[Tile].singleton, parameter[constant[.]]] variable[a] assign[=] name[self]._array while name[target_position_groups] begin[:] variable[clear_after_storing] assign[=] call[name[list], parameter[]] variable[new_target_position_groups] assign[=] call[name[list], parameter[]] for taget[name[target_position_group]] in starred[name[target_position_groups]] begin[:] variable[destroyed_tile_group] assign[=] call[name[list], parameter[]] for taget[name[target_position]] in starred[name[target_position_group]] begin[:] variable[target_tile] assign[=] call[name[a]][name[target_position]] if call[name[target_tile].is_blank, parameter[]] begin[:] continue call[name[destroyed_tile_group].append, parameter[name[target_tile]]] call[name[clear_after_storing].append, parameter[name[target_position]]] if call[name[target_tile].is_skullbomb, parameter[]] begin[:] variable[new_positions] assign[=] call[name[self].__skullbomb_radius, parameter[name[target_position]]] variable[new_position_groups] assign[=] <ast.ListComp object at 0x7da1b26a5090> call[name[new_target_position_groups].extend, parameter[name[new_position_groups]]] if name[destroyed_tile_group] begin[:] call[name[destroyed_tile_groups].append, parameter[name[destroyed_tile_group]]] for taget[name[position]] in starred[name[clear_after_storing]] begin[:] call[name[a]][name[position]] assign[=] name[blank] variable[target_position_groups] assign[=] name[new_target_position_groups] return[name[destroyed_tile_groups]]
keyword[def] identifier[_destroy] ( identifier[self] , identifier[target_position_groups] ): literal[string] identifier[target_position_groups] = identifier[list] ( identifier[target_position_groups] ) identifier[destroyed_tile_groups] = identifier[list] () identifier[blank] = identifier[Tile] . identifier[singleton] ( literal[string] ) identifier[a] = identifier[self] . identifier[_array] keyword[while] identifier[target_position_groups] : identifier[clear_after_storing] = identifier[list] () identifier[new_target_position_groups] = identifier[list] () keyword[for] identifier[target_position_group] keyword[in] identifier[target_position_groups] : identifier[destroyed_tile_group] = identifier[list] () keyword[for] identifier[target_position] keyword[in] identifier[target_position_group] : identifier[target_tile] = identifier[a] [ identifier[target_position] ] keyword[if] identifier[target_tile] . identifier[is_blank] (): keyword[continue] identifier[destroyed_tile_group] . identifier[append] ( identifier[target_tile] ) identifier[clear_after_storing] . identifier[append] ( identifier[target_position] ) keyword[if] identifier[target_tile] . identifier[is_skullbomb] (): identifier[new_positions] = identifier[self] . identifier[__skullbomb_radius] ( identifier[target_position] ) identifier[new_position_groups] =[( identifier[new_position] ,) keyword[for] identifier[new_position] keyword[in] identifier[new_positions] ] identifier[new_target_position_groups] . identifier[extend] ( identifier[new_position_groups] ) keyword[if] identifier[destroyed_tile_group] : identifier[destroyed_tile_groups] . identifier[append] ( identifier[destroyed_tile_group] ) keyword[for] identifier[position] keyword[in] identifier[clear_after_storing] : identifier[a] [ identifier[position] ]= identifier[blank] identifier[target_position_groups] = identifier[new_target_position_groups] keyword[return] identifier[destroyed_tile_groups]
def _destroy(self, target_position_groups): """Destroy indicated position groups, handle any chain destructions, and return all destroyed groups.""" target_position_groups = list(target_position_groups) # work on a copy destroyed_tile_groups = list() blank = Tile.singleton('.') a = self._array while target_position_groups: # continue as long as more targets exist # delay actual clearing of destroyed tiles until all claiming # groups have been stored (e.g. overlapping matches, bombs) clear_after_storing = list() new_target_position_groups = list() for target_position_group in target_position_groups: destroyed_tile_group = list() for target_position in target_position_group: target_tile = a[target_position] # no handling for blanks that appear in destruction if target_tile.is_blank(): continue # depends on [control=['if'], data=[]] destroyed_tile_group.append(target_tile) clear_after_storing.append(target_position) # skull bombs require further destructions if target_tile.is_skullbomb(): new_positions = self.__skullbomb_radius(target_position) # convert individual positions to position groups new_position_groups = [(new_position,) for new_position in new_positions] new_target_position_groups.extend(new_position_groups) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['target_position']] if destroyed_tile_group: destroyed_tile_groups.append(destroyed_tile_group) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['target_position_group']] # Finally clear positions after all records have been made for position in clear_after_storing: a[position] = blank # depends on [control=['for'], data=['position']] # Replace the completed target position groups with any new ones target_position_groups = new_target_position_groups # depends on [control=['while'], data=[]] return destroyed_tile_groups
def from_labeled_point(rdd, categorical=False, nb_classes=None): """Convert a LabeledPoint RDD back to a pair of numpy arrays :param rdd: LabeledPoint RDD :param categorical: boolean, if labels should be one-hot encode when returned :param nb_classes: optional int, indicating the number of class labels :return: pair of numpy arrays, features and labels """ features = np.asarray( rdd.map(lambda lp: from_vector(lp.features)).collect()) labels = np.asarray(rdd.map(lambda lp: lp.label).collect(), dtype='int32') if categorical: if not nb_classes: nb_classes = np.max(labels) + 1 temp = np.zeros((len(labels), nb_classes)) for i, label in enumerate(labels): temp[i, label] = 1. labels = temp return features, labels
def function[from_labeled_point, parameter[rdd, categorical, nb_classes]]: constant[Convert a LabeledPoint RDD back to a pair of numpy arrays :param rdd: LabeledPoint RDD :param categorical: boolean, if labels should be one-hot encode when returned :param nb_classes: optional int, indicating the number of class labels :return: pair of numpy arrays, features and labels ] variable[features] assign[=] call[name[np].asarray, parameter[call[call[name[rdd].map, parameter[<ast.Lambda object at 0x7da1b1881840>]].collect, parameter[]]]] variable[labels] assign[=] call[name[np].asarray, parameter[call[call[name[rdd].map, parameter[<ast.Lambda object at 0x7da1b1880820>]].collect, parameter[]]]] if name[categorical] begin[:] if <ast.UnaryOp object at 0x7da1b1880f40> begin[:] variable[nb_classes] assign[=] binary_operation[call[name[np].max, parameter[name[labels]]] + constant[1]] variable[temp] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Call object at 0x7da1b184cd90>, <ast.Name object at 0x7da1b184c6a0>]]]] for taget[tuple[[<ast.Name object at 0x7da1b184e740>, <ast.Name object at 0x7da1b184e6e0>]]] in starred[call[name[enumerate], parameter[name[labels]]]] begin[:] call[name[temp]][tuple[[<ast.Name object at 0x7da1b184e6b0>, <ast.Name object at 0x7da1b184e020>]]] assign[=] constant[1.0] variable[labels] assign[=] name[temp] return[tuple[[<ast.Name object at 0x7da1b184c100>, <ast.Name object at 0x7da1b184df90>]]]
keyword[def] identifier[from_labeled_point] ( identifier[rdd] , identifier[categorical] = keyword[False] , identifier[nb_classes] = keyword[None] ): literal[string] identifier[features] = identifier[np] . identifier[asarray] ( identifier[rdd] . identifier[map] ( keyword[lambda] identifier[lp] : identifier[from_vector] ( identifier[lp] . identifier[features] )). identifier[collect] ()) identifier[labels] = identifier[np] . identifier[asarray] ( identifier[rdd] . identifier[map] ( keyword[lambda] identifier[lp] : identifier[lp] . identifier[label] ). identifier[collect] (), identifier[dtype] = literal[string] ) keyword[if] identifier[categorical] : keyword[if] keyword[not] identifier[nb_classes] : identifier[nb_classes] = identifier[np] . identifier[max] ( identifier[labels] )+ literal[int] identifier[temp] = identifier[np] . identifier[zeros] (( identifier[len] ( identifier[labels] ), identifier[nb_classes] )) keyword[for] identifier[i] , identifier[label] keyword[in] identifier[enumerate] ( identifier[labels] ): identifier[temp] [ identifier[i] , identifier[label] ]= literal[int] identifier[labels] = identifier[temp] keyword[return] identifier[features] , identifier[labels]
def from_labeled_point(rdd, categorical=False, nb_classes=None): """Convert a LabeledPoint RDD back to a pair of numpy arrays :param rdd: LabeledPoint RDD :param categorical: boolean, if labels should be one-hot encode when returned :param nb_classes: optional int, indicating the number of class labels :return: pair of numpy arrays, features and labels """ features = np.asarray(rdd.map(lambda lp: from_vector(lp.features)).collect()) labels = np.asarray(rdd.map(lambda lp: lp.label).collect(), dtype='int32') if categorical: if not nb_classes: nb_classes = np.max(labels) + 1 # depends on [control=['if'], data=[]] temp = np.zeros((len(labels), nb_classes)) for (i, label) in enumerate(labels): temp[i, label] = 1.0 # depends on [control=['for'], data=[]] labels = temp # depends on [control=['if'], data=[]] return (features, labels)