code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def set_ports(self, port0 = 0x00, port1 = 0x00): 'Writes specified value to the pins defined as output by method. Writing to input pins has no effect.' self.bus.write_byte_data(self.address, self.CONTROL_PORT0, port0) self.bus.write_byte_data(self.address, self.CONTROL_PORT0, port1) return
def function[set_ports, parameter[self, port0, port1]]: constant[Writes specified value to the pins defined as output by method. Writing to input pins has no effect.] call[name[self].bus.write_byte_data, parameter[name[self].address, name[self].CONTROL_PORT0, name[port0]]] call[name[self].bus.write_byte_data, parameter[name[self].address, name[self].CONTROL_PORT0, name[port1]]] return[None]
keyword[def] identifier[set_ports] ( identifier[self] , identifier[port0] = literal[int] , identifier[port1] = literal[int] ): literal[string] identifier[self] . identifier[bus] . identifier[write_byte_data] ( identifier[self] . identifier[address] , identifier[self] . identifier[CONTROL_PORT0] , identifier[port0] ) identifier[self] . identifier[bus] . identifier[write_byte_data] ( identifier[self] . identifier[address] , identifier[self] . identifier[CONTROL_PORT0] , identifier[port1] ) keyword[return]
def set_ports(self, port0=0, port1=0): """Writes specified value to the pins defined as output by method. Writing to input pins has no effect.""" self.bus.write_byte_data(self.address, self.CONTROL_PORT0, port0) self.bus.write_byte_data(self.address, self.CONTROL_PORT0, port1) return
def init_app(self, app): """Initialize a :class:`~flask.Flask` application for use with this extension. """ self._jobs = [] if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['restpoints'] = self app.restpoints_instance = self app.add_url_rule('/ping', 'ping', ping) app.add_url_rule('/time', 'time', time) app.add_url_rule('/status', 'status', status(self._jobs))
def function[init_app, parameter[self, app]]: constant[Initialize a :class:`~flask.Flask` application for use with this extension. ] name[self]._jobs assign[=] list[[]] if <ast.UnaryOp object at 0x7da1afe8b4f0> begin[:] name[app].extensions assign[=] dictionary[[], []] call[name[app].extensions][constant[restpoints]] assign[=] name[self] name[app].restpoints_instance assign[=] name[self] call[name[app].add_url_rule, parameter[constant[/ping], constant[ping], name[ping]]] call[name[app].add_url_rule, parameter[constant[/time], constant[time], name[time]]] call[name[app].add_url_rule, parameter[constant[/status], constant[status], call[name[status], parameter[name[self]._jobs]]]]
keyword[def] identifier[init_app] ( identifier[self] , identifier[app] ): literal[string] identifier[self] . identifier[_jobs] =[] keyword[if] keyword[not] identifier[hasattr] ( identifier[app] , literal[string] ): identifier[app] . identifier[extensions] ={} identifier[app] . identifier[extensions] [ literal[string] ]= identifier[self] identifier[app] . identifier[restpoints_instance] = identifier[self] identifier[app] . identifier[add_url_rule] ( literal[string] , literal[string] , identifier[ping] ) identifier[app] . identifier[add_url_rule] ( literal[string] , literal[string] , identifier[time] ) identifier[app] . identifier[add_url_rule] ( literal[string] , literal[string] , identifier[status] ( identifier[self] . identifier[_jobs] ))
def init_app(self, app): """Initialize a :class:`~flask.Flask` application for use with this extension. """ self._jobs = [] if not hasattr(app, 'extensions'): app.extensions = {} # depends on [control=['if'], data=[]] app.extensions['restpoints'] = self app.restpoints_instance = self app.add_url_rule('/ping', 'ping', ping) app.add_url_rule('/time', 'time', time) app.add_url_rule('/status', 'status', status(self._jobs))
def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False): ''' load summaries of runs from a list of directories (including subdirectories) Arguments: enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False Returns: List of Result objects with the following fields: - dirname - path to the directory data was loaded from - metadata - run metadata (such as command-line arguments and anything else in metadata.json file - monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory) - progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file ''' import re if isinstance(root_dir_or_dirs, str): rootdirs = [osp.expanduser(root_dir_or_dirs)] else: rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs] allresults = [] for rootdir in rootdirs: assert osp.exists(rootdir), "%s doesn't exist"%rootdir for dirname, dirs, files in os.walk(rootdir): if '-proc' in dirname: files[:] = [] continue monitor_re = re.compile(r'(\d+\.)?(\d+\.)?monitor\.csv') if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or \ any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv # used to be uncommented, which means do not go deeper than current directory if any of the data files # are found # dirs[:] = [] result = {'dirname' : dirname} if "metadata.json" in files: with open(osp.join(dirname, "metadata.json"), "r") as fh: result['metadata'] = json.load(fh) progjson = osp.join(dirname, "progress.json") progcsv = osp.join(dirname, "progress.csv") if enable_progress: if osp.exists(progjson): result['progress'] = pandas.DataFrame(read_json(progjson)) elif osp.exists(progcsv): try: result['progress'] = read_csv(progcsv) except pandas.errors.EmptyDataError: print('skipping progress file in ', dirname, 'empty data') else: if verbose: print('skipping %s: no progress file'%dirname) if enable_monitor: try: result['monitor'] = pandas.DataFrame(monitor.load_results(dirname)) except monitor.LoadMonitorResultsError: print('skipping %s: no monitor files'%dirname) except Exception as e: print('exception loading monitor file in %s: %s'%(dirname, e)) if result.get('monitor') is not None or result.get('progress') is not None: allresults.append(Result(**result)) if verbose: print('successfully loaded %s'%dirname) if verbose: print('loaded %i results'%len(allresults)) return allresults
def function[load_results, parameter[root_dir_or_dirs, enable_progress, enable_monitor, verbose]]: constant[ load summaries of runs from a list of directories (including subdirectories) Arguments: enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False Returns: List of Result objects with the following fields: - dirname - path to the directory data was loaded from - metadata - run metadata (such as command-line arguments and anything else in metadata.json file - monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory) - progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file ] import module[re] if call[name[isinstance], parameter[name[root_dir_or_dirs], name[str]]] begin[:] variable[rootdirs] assign[=] list[[<ast.Call object at 0x7da2045668f0>]] variable[allresults] assign[=] list[[]] for taget[name[rootdir]] in starred[name[rootdirs]] begin[:] assert[call[name[osp].exists, parameter[name[rootdir]]]] for taget[tuple[[<ast.Name object at 0x7da204565e40>, <ast.Name object at 0x7da204566cb0>, <ast.Name object at 0x7da204567e80>]]] in starred[call[name[os].walk, parameter[name[rootdir]]]] begin[:] if compare[constant[-proc] in name[dirname]] begin[:] call[name[files]][<ast.Slice object at 0x7da2045671c0>] assign[=] list[[]] continue variable[monitor_re] assign[=] call[name[re].compile, parameter[constant[(\d+\.)?(\d+\.)?monitor\.csv]]] if <ast.BoolOp object at 0x7da2045677f0> begin[:] variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da2045665f0>], [<ast.Name object at 0x7da204564fa0>]] if compare[constant[metadata.json] in name[files]] begin[:] with call[name[open], parameter[call[name[osp].join, parameter[name[dirname], constant[metadata.json]]], constant[r]]] begin[:] call[name[result]][constant[metadata]] assign[=] call[name[json].load, parameter[name[fh]]] variable[progjson] assign[=] call[name[osp].join, parameter[name[dirname], constant[progress.json]]] variable[progcsv] assign[=] call[name[osp].join, parameter[name[dirname], constant[progress.csv]]] if name[enable_progress] begin[:] if call[name[osp].exists, parameter[name[progjson]]] begin[:] call[name[result]][constant[progress]] assign[=] call[name[pandas].DataFrame, parameter[call[name[read_json], parameter[name[progjson]]]]] if name[enable_monitor] begin[:] <ast.Try object at 0x7da204567670> if <ast.BoolOp object at 0x7da1b02136d0> begin[:] call[name[allresults].append, parameter[call[name[Result], parameter[]]]] if name[verbose] begin[:] call[name[print], parameter[binary_operation[constant[successfully loaded %s] <ast.Mod object at 0x7da2590d6920> name[dirname]]]] if name[verbose] begin[:] call[name[print], parameter[binary_operation[constant[loaded %i results] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[allresults]]]]]] return[name[allresults]]
keyword[def] identifier[load_results] ( identifier[root_dir_or_dirs] , identifier[enable_progress] = keyword[True] , identifier[enable_monitor] = keyword[True] , identifier[verbose] = keyword[False] ): literal[string] keyword[import] identifier[re] keyword[if] identifier[isinstance] ( identifier[root_dir_or_dirs] , identifier[str] ): identifier[rootdirs] =[ identifier[osp] . identifier[expanduser] ( identifier[root_dir_or_dirs] )] keyword[else] : identifier[rootdirs] =[ identifier[osp] . identifier[expanduser] ( identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[root_dir_or_dirs] ] identifier[allresults] =[] keyword[for] identifier[rootdir] keyword[in] identifier[rootdirs] : keyword[assert] identifier[osp] . identifier[exists] ( identifier[rootdir] ), literal[string] % identifier[rootdir] keyword[for] identifier[dirname] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[rootdir] ): keyword[if] literal[string] keyword[in] identifier[dirname] : identifier[files] [:]=[] keyword[continue] identifier[monitor_re] = identifier[re] . identifier[compile] ( literal[string] ) keyword[if] identifier[set] ([ literal[string] , literal[string] , literal[string] , literal[string] ]). identifier[intersection] ( identifier[files] ) keyword[or] identifier[any] ([ identifier[f] keyword[for] identifier[f] keyword[in] identifier[files] keyword[if] identifier[monitor_re] . identifier[match] ( identifier[f] )]): identifier[result] ={ literal[string] : identifier[dirname] } keyword[if] literal[string] keyword[in] identifier[files] : keyword[with] identifier[open] ( identifier[osp] . identifier[join] ( identifier[dirname] , literal[string] ), literal[string] ) keyword[as] identifier[fh] : identifier[result] [ literal[string] ]= identifier[json] . identifier[load] ( identifier[fh] ) identifier[progjson] = identifier[osp] . identifier[join] ( identifier[dirname] , literal[string] ) identifier[progcsv] = identifier[osp] . identifier[join] ( identifier[dirname] , literal[string] ) keyword[if] identifier[enable_progress] : keyword[if] identifier[osp] . identifier[exists] ( identifier[progjson] ): identifier[result] [ literal[string] ]= identifier[pandas] . identifier[DataFrame] ( identifier[read_json] ( identifier[progjson] )) keyword[elif] identifier[osp] . identifier[exists] ( identifier[progcsv] ): keyword[try] : identifier[result] [ literal[string] ]= identifier[read_csv] ( identifier[progcsv] ) keyword[except] identifier[pandas] . identifier[errors] . identifier[EmptyDataError] : identifier[print] ( literal[string] , identifier[dirname] , literal[string] ) keyword[else] : keyword[if] identifier[verbose] : identifier[print] ( literal[string] % identifier[dirname] ) keyword[if] identifier[enable_monitor] : keyword[try] : identifier[result] [ literal[string] ]= identifier[pandas] . identifier[DataFrame] ( identifier[monitor] . identifier[load_results] ( identifier[dirname] )) keyword[except] identifier[monitor] . identifier[LoadMonitorResultsError] : identifier[print] ( literal[string] % identifier[dirname] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print] ( literal[string] %( identifier[dirname] , identifier[e] )) keyword[if] identifier[result] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[or] identifier[result] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] : identifier[allresults] . identifier[append] ( identifier[Result] (** identifier[result] )) keyword[if] identifier[verbose] : identifier[print] ( literal[string] % identifier[dirname] ) keyword[if] identifier[verbose] : identifier[print] ( literal[string] % identifier[len] ( identifier[allresults] )) keyword[return] identifier[allresults]
def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False): """ load summaries of runs from a list of directories (including subdirectories) Arguments: enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False Returns: List of Result objects with the following fields: - dirname - path to the directory data was loaded from - metadata - run metadata (such as command-line arguments and anything else in metadata.json file - monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory) - progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file """ import re if isinstance(root_dir_or_dirs, str): rootdirs = [osp.expanduser(root_dir_or_dirs)] # depends on [control=['if'], data=[]] else: rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs] allresults = [] for rootdir in rootdirs: assert osp.exists(rootdir), "%s doesn't exist" % rootdir for (dirname, dirs, files) in os.walk(rootdir): if '-proc' in dirname: files[:] = [] continue # depends on [control=['if'], data=[]] monitor_re = re.compile('(\\d+\\.)?(\\d+\\.)?monitor\\.csv') if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv # used to be uncommented, which means do not go deeper than current directory if any of the data files # are found # dirs[:] = [] result = {'dirname': dirname} if 'metadata.json' in files: with open(osp.join(dirname, 'metadata.json'), 'r') as fh: result['metadata'] = json.load(fh) # depends on [control=['with'], data=['fh']] # depends on [control=['if'], data=[]] progjson = osp.join(dirname, 'progress.json') progcsv = osp.join(dirname, 'progress.csv') if enable_progress: if osp.exists(progjson): result['progress'] = pandas.DataFrame(read_json(progjson)) # depends on [control=['if'], data=[]] elif osp.exists(progcsv): try: result['progress'] = read_csv(progcsv) # depends on [control=['try'], data=[]] except pandas.errors.EmptyDataError: print('skipping progress file in ', dirname, 'empty data') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] elif verbose: print('skipping %s: no progress file' % dirname) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if enable_monitor: try: result['monitor'] = pandas.DataFrame(monitor.load_results(dirname)) # depends on [control=['try'], data=[]] except monitor.LoadMonitorResultsError: print('skipping %s: no monitor files' % dirname) # depends on [control=['except'], data=[]] except Exception as e: print('exception loading monitor file in %s: %s' % (dirname, e)) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] if result.get('monitor') is not None or result.get('progress') is not None: allresults.append(Result(**result)) if verbose: print('successfully loaded %s' % dirname) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['rootdir']] if verbose: print('loaded %i results' % len(allresults)) # depends on [control=['if'], data=[]] return allresults
def interpolate_P(self, T, P, name): r'''Method to perform interpolation on a given tabular data set previously added via `set_tabular_data_P`. This method will create the interpolators the first time it is used on a property set, and store them for quick future use. Interpolation is cubic-spline based if 5 or more points are available, and linearly interpolated if not. Extrapolation is always performed linearly. This function uses the transforms `interpolation_T`, `interpolation_P`, `interpolation_property`, and `interpolation_property_inv` if set. If any of these are changed after the interpolators were first created, new interpolators are created with the new transforms. All interpolation is performed via the `interp2d` function. Parameters ---------- T : float Temperature at which to interpolate the property, [K] T : float Pressure at which to interpolate the property, [Pa] name : str The name assigned to the tabular data set Returns ------- prop : float Calculated property, [`units`] ''' key = (name, self.interpolation_T, self.interpolation_P, self.interpolation_property, self.interpolation_property_inv) # If the interpolator and extrapolator has already been created, load it if key in self.tabular_data_interpolators: extrapolator, spline = self.tabular_data_interpolators[key] else: Ts, Ps, properties = self.tabular_data[name] if self.interpolation_T: # Transform ths Ts with interpolation_T if set Ts2 = [self.interpolation_T(T2) for T2 in Ts] else: Ts2 = Ts if self.interpolation_P: # Transform ths Ts with interpolation_T if set Ps2 = [self.interpolation_P(P2) for P2 in Ps] else: Ps2 = Ps if self.interpolation_property: # Transform ths props with interpolation_property if set properties2 = [self.interpolation_property(p) for p in properties] else: properties2 = properties # Only allow linear extrapolation, but with whatever transforms are specified extrapolator = interp2d(Ts2, Ps2, properties2) # interpolation if fill value is missing # If more than 5 property points, create a spline interpolation if len(properties) >= 5: spline = interp2d(Ts2, Ps2, properties2, kind='cubic') else: spline = None self.tabular_data_interpolators[key] = (extrapolator, spline) # Load the stores values, tor checking which interpolation strategy to # use. Ts, Ps, properties = self.tabular_data[name] if T < Ts[0] or T > Ts[-1] or not spline or P < Ps[0] or P > Ps[-1]: tool = extrapolator else: tool = spline if self.interpolation_T: T = self.interpolation_T(T) if self.interpolation_P: P = self.interpolation_T(P) prop = tool(T, P) # either spline, or linear interpolation if self.interpolation_property: prop = self.interpolation_property_inv(prop) return float(prop)
def function[interpolate_P, parameter[self, T, P, name]]: constant[Method to perform interpolation on a given tabular data set previously added via `set_tabular_data_P`. This method will create the interpolators the first time it is used on a property set, and store them for quick future use. Interpolation is cubic-spline based if 5 or more points are available, and linearly interpolated if not. Extrapolation is always performed linearly. This function uses the transforms `interpolation_T`, `interpolation_P`, `interpolation_property`, and `interpolation_property_inv` if set. If any of these are changed after the interpolators were first created, new interpolators are created with the new transforms. All interpolation is performed via the `interp2d` function. Parameters ---------- T : float Temperature at which to interpolate the property, [K] T : float Pressure at which to interpolate the property, [Pa] name : str The name assigned to the tabular data set Returns ------- prop : float Calculated property, [`units`] ] variable[key] assign[=] tuple[[<ast.Name object at 0x7da204622e60>, <ast.Attribute object at 0x7da204620310>, <ast.Attribute object at 0x7da204623df0>, <ast.Attribute object at 0x7da204621510>, <ast.Attribute object at 0x7da204623f40>]] if compare[name[key] in name[self].tabular_data_interpolators] begin[:] <ast.Tuple object at 0x7da204623220> assign[=] call[name[self].tabular_data_interpolators][name[key]] <ast.Tuple object at 0x7da204621c30> assign[=] call[name[self].tabular_data][name[name]] if <ast.BoolOp object at 0x7da204623e20> begin[:] variable[tool] assign[=] name[extrapolator] if name[self].interpolation_T begin[:] variable[T] assign[=] call[name[self].interpolation_T, parameter[name[T]]] if name[self].interpolation_P begin[:] variable[P] assign[=] call[name[self].interpolation_T, parameter[name[P]]] variable[prop] assign[=] call[name[tool], parameter[name[T], name[P]]] if name[self].interpolation_property begin[:] variable[prop] assign[=] call[name[self].interpolation_property_inv, parameter[name[prop]]] return[call[name[float], parameter[name[prop]]]]
keyword[def] identifier[interpolate_P] ( identifier[self] , identifier[T] , identifier[P] , identifier[name] ): literal[string] identifier[key] =( identifier[name] , identifier[self] . identifier[interpolation_T] , identifier[self] . identifier[interpolation_P] , identifier[self] . identifier[interpolation_property] , identifier[self] . identifier[interpolation_property_inv] ) keyword[if] identifier[key] keyword[in] identifier[self] . identifier[tabular_data_interpolators] : identifier[extrapolator] , identifier[spline] = identifier[self] . identifier[tabular_data_interpolators] [ identifier[key] ] keyword[else] : identifier[Ts] , identifier[Ps] , identifier[properties] = identifier[self] . identifier[tabular_data] [ identifier[name] ] keyword[if] identifier[self] . identifier[interpolation_T] : identifier[Ts2] =[ identifier[self] . identifier[interpolation_T] ( identifier[T2] ) keyword[for] identifier[T2] keyword[in] identifier[Ts] ] keyword[else] : identifier[Ts2] = identifier[Ts] keyword[if] identifier[self] . identifier[interpolation_P] : identifier[Ps2] =[ identifier[self] . identifier[interpolation_P] ( identifier[P2] ) keyword[for] identifier[P2] keyword[in] identifier[Ps] ] keyword[else] : identifier[Ps2] = identifier[Ps] keyword[if] identifier[self] . identifier[interpolation_property] : identifier[properties2] =[ identifier[self] . identifier[interpolation_property] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[properties] ] keyword[else] : identifier[properties2] = identifier[properties] identifier[extrapolator] = identifier[interp2d] ( identifier[Ts2] , identifier[Ps2] , identifier[properties2] ) keyword[if] identifier[len] ( identifier[properties] )>= literal[int] : identifier[spline] = identifier[interp2d] ( identifier[Ts2] , identifier[Ps2] , identifier[properties2] , identifier[kind] = literal[string] ) keyword[else] : identifier[spline] = keyword[None] identifier[self] . identifier[tabular_data_interpolators] [ identifier[key] ]=( identifier[extrapolator] , identifier[spline] ) identifier[Ts] , identifier[Ps] , identifier[properties] = identifier[self] . identifier[tabular_data] [ identifier[name] ] keyword[if] identifier[T] < identifier[Ts] [ literal[int] ] keyword[or] identifier[T] > identifier[Ts] [- literal[int] ] keyword[or] keyword[not] identifier[spline] keyword[or] identifier[P] < identifier[Ps] [ literal[int] ] keyword[or] identifier[P] > identifier[Ps] [- literal[int] ]: identifier[tool] = identifier[extrapolator] keyword[else] : identifier[tool] = identifier[spline] keyword[if] identifier[self] . identifier[interpolation_T] : identifier[T] = identifier[self] . identifier[interpolation_T] ( identifier[T] ) keyword[if] identifier[self] . identifier[interpolation_P] : identifier[P] = identifier[self] . identifier[interpolation_T] ( identifier[P] ) identifier[prop] = identifier[tool] ( identifier[T] , identifier[P] ) keyword[if] identifier[self] . identifier[interpolation_property] : identifier[prop] = identifier[self] . identifier[interpolation_property_inv] ( identifier[prop] ) keyword[return] identifier[float] ( identifier[prop] )
def interpolate_P(self, T, P, name): """Method to perform interpolation on a given tabular data set previously added via `set_tabular_data_P`. This method will create the interpolators the first time it is used on a property set, and store them for quick future use. Interpolation is cubic-spline based if 5 or more points are available, and linearly interpolated if not. Extrapolation is always performed linearly. This function uses the transforms `interpolation_T`, `interpolation_P`, `interpolation_property`, and `interpolation_property_inv` if set. If any of these are changed after the interpolators were first created, new interpolators are created with the new transforms. All interpolation is performed via the `interp2d` function. Parameters ---------- T : float Temperature at which to interpolate the property, [K] T : float Pressure at which to interpolate the property, [Pa] name : str The name assigned to the tabular data set Returns ------- prop : float Calculated property, [`units`] """ key = (name, self.interpolation_T, self.interpolation_P, self.interpolation_property, self.interpolation_property_inv) # If the interpolator and extrapolator has already been created, load it if key in self.tabular_data_interpolators: (extrapolator, spline) = self.tabular_data_interpolators[key] # depends on [control=['if'], data=['key']] else: (Ts, Ps, properties) = self.tabular_data[name] if self.interpolation_T: # Transform ths Ts with interpolation_T if set Ts2 = [self.interpolation_T(T2) for T2 in Ts] # depends on [control=['if'], data=[]] else: Ts2 = Ts if self.interpolation_P: # Transform ths Ts with interpolation_T if set Ps2 = [self.interpolation_P(P2) for P2 in Ps] # depends on [control=['if'], data=[]] else: Ps2 = Ps if self.interpolation_property: # Transform ths props with interpolation_property if set properties2 = [self.interpolation_property(p) for p in properties] # depends on [control=['if'], data=[]] else: properties2 = properties # Only allow linear extrapolation, but with whatever transforms are specified extrapolator = interp2d(Ts2, Ps2, properties2) # interpolation if fill value is missing # If more than 5 property points, create a spline interpolation if len(properties) >= 5: spline = interp2d(Ts2, Ps2, properties2, kind='cubic') # depends on [control=['if'], data=[]] else: spline = None self.tabular_data_interpolators[key] = (extrapolator, spline) # Load the stores values, tor checking which interpolation strategy to # use. (Ts, Ps, properties) = self.tabular_data[name] if T < Ts[0] or T > Ts[-1] or (not spline) or (P < Ps[0]) or (P > Ps[-1]): tool = extrapolator # depends on [control=['if'], data=[]] else: tool = spline if self.interpolation_T: T = self.interpolation_T(T) # depends on [control=['if'], data=[]] if self.interpolation_P: P = self.interpolation_T(P) # depends on [control=['if'], data=[]] prop = tool(T, P) # either spline, or linear interpolation if self.interpolation_property: prop = self.interpolation_property_inv(prop) # depends on [control=['if'], data=[]] return float(prop)
def executor(self, max_workers=1): """single global executor""" cls = self.__class__ if cls._executor is None: cls._executor = ThreadPoolExecutor(max_workers) return cls._executor
def function[executor, parameter[self, max_workers]]: constant[single global executor] variable[cls] assign[=] name[self].__class__ if compare[name[cls]._executor is constant[None]] begin[:] name[cls]._executor assign[=] call[name[ThreadPoolExecutor], parameter[name[max_workers]]] return[name[cls]._executor]
keyword[def] identifier[executor] ( identifier[self] , identifier[max_workers] = literal[int] ): literal[string] identifier[cls] = identifier[self] . identifier[__class__] keyword[if] identifier[cls] . identifier[_executor] keyword[is] keyword[None] : identifier[cls] . identifier[_executor] = identifier[ThreadPoolExecutor] ( identifier[max_workers] ) keyword[return] identifier[cls] . identifier[_executor]
def executor(self, max_workers=1): """single global executor""" cls = self.__class__ if cls._executor is None: cls._executor = ThreadPoolExecutor(max_workers) # depends on [control=['if'], data=[]] return cls._executor
def geometry_range(crd_range, elev, crd_type): """ Range of coordinates. (e.g. 2 latitude coordinates, and 0 longitude coordinates) :param crd_range: Latitude or Longitude values :param elev: Elevation value :param crd_type: Coordinate type, lat or lon :return dict: """ d = OrderedDict() coordinates = [[] for i in range(len(crd_range))] # latitude if crd_type == "lat": for idx, i in enumerate(crd_range): coordinates[idx] = [crd_range[idx], "nan"] if elev: coordinates[idx].append(elev) # longitude elif crd_type == "lon": for idx, i in enumerate(crd_range): coordinates[idx] = ["nan", crd_range[idx]] if elev: coordinates[idx].append(elev) d["type"] = "Range" d["coordinates"] = coordinates return d
def function[geometry_range, parameter[crd_range, elev, crd_type]]: constant[ Range of coordinates. (e.g. 2 latitude coordinates, and 0 longitude coordinates) :param crd_range: Latitude or Longitude values :param elev: Elevation value :param crd_type: Coordinate type, lat or lon :return dict: ] variable[d] assign[=] call[name[OrderedDict], parameter[]] variable[coordinates] assign[=] <ast.ListComp object at 0x7da18f723ac0> if compare[name[crd_type] equal[==] constant[lat]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18f720790>, <ast.Name object at 0x7da18f7236d0>]]] in starred[call[name[enumerate], parameter[name[crd_range]]]] begin[:] call[name[coordinates]][name[idx]] assign[=] list[[<ast.Subscript object at 0x7da18fe914b0>, <ast.Constant object at 0x7da18fe903d0>]] if name[elev] begin[:] call[call[name[coordinates]][name[idx]].append, parameter[name[elev]]] call[name[d]][constant[type]] assign[=] constant[Range] call[name[d]][constant[coordinates]] assign[=] name[coordinates] return[name[d]]
keyword[def] identifier[geometry_range] ( identifier[crd_range] , identifier[elev] , identifier[crd_type] ): literal[string] identifier[d] = identifier[OrderedDict] () identifier[coordinates] =[[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[crd_range] ))] keyword[if] identifier[crd_type] == literal[string] : keyword[for] identifier[idx] , identifier[i] keyword[in] identifier[enumerate] ( identifier[crd_range] ): identifier[coordinates] [ identifier[idx] ]=[ identifier[crd_range] [ identifier[idx] ], literal[string] ] keyword[if] identifier[elev] : identifier[coordinates] [ identifier[idx] ]. identifier[append] ( identifier[elev] ) keyword[elif] identifier[crd_type] == literal[string] : keyword[for] identifier[idx] , identifier[i] keyword[in] identifier[enumerate] ( identifier[crd_range] ): identifier[coordinates] [ identifier[idx] ]=[ literal[string] , identifier[crd_range] [ identifier[idx] ]] keyword[if] identifier[elev] : identifier[coordinates] [ identifier[idx] ]. identifier[append] ( identifier[elev] ) identifier[d] [ literal[string] ]= literal[string] identifier[d] [ literal[string] ]= identifier[coordinates] keyword[return] identifier[d]
def geometry_range(crd_range, elev, crd_type): """ Range of coordinates. (e.g. 2 latitude coordinates, and 0 longitude coordinates) :param crd_range: Latitude or Longitude values :param elev: Elevation value :param crd_type: Coordinate type, lat or lon :return dict: """ d = OrderedDict() coordinates = [[] for i in range(len(crd_range))] # latitude if crd_type == 'lat': for (idx, i) in enumerate(crd_range): coordinates[idx] = [crd_range[idx], 'nan'] if elev: coordinates[idx].append(elev) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # longitude elif crd_type == 'lon': for (idx, i) in enumerate(crd_range): coordinates[idx] = ['nan', crd_range[idx]] if elev: coordinates[idx].append(elev) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] d['type'] = 'Range' d['coordinates'] = coordinates return d
def recompute_tabs_titles(self): """Updates labels on all tabs. This is required when `self.abbreviate` changes """ use_vte_titles = self.settings.general.get_boolean("use-vte-titles") if not use_vte_titles: return # TODO NOTEBOOK this code only works if there is only one terminal in a # page, this need to be rewritten for terminal in self.get_notebook().iter_terminals(): page_num = self.get_notebook().page_num(terminal.get_parent()) self.get_notebook().rename_page(page_num, self.compute_tab_title(terminal), False)
def function[recompute_tabs_titles, parameter[self]]: constant[Updates labels on all tabs. This is required when `self.abbreviate` changes ] variable[use_vte_titles] assign[=] call[name[self].settings.general.get_boolean, parameter[constant[use-vte-titles]]] if <ast.UnaryOp object at 0x7da1b23474c0> begin[:] return[None] for taget[name[terminal]] in starred[call[call[name[self].get_notebook, parameter[]].iter_terminals, parameter[]]] begin[:] variable[page_num] assign[=] call[call[name[self].get_notebook, parameter[]].page_num, parameter[call[name[terminal].get_parent, parameter[]]]] call[call[name[self].get_notebook, parameter[]].rename_page, parameter[name[page_num], call[name[self].compute_tab_title, parameter[name[terminal]]], constant[False]]]
keyword[def] identifier[recompute_tabs_titles] ( identifier[self] ): literal[string] identifier[use_vte_titles] = identifier[self] . identifier[settings] . identifier[general] . identifier[get_boolean] ( literal[string] ) keyword[if] keyword[not] identifier[use_vte_titles] : keyword[return] keyword[for] identifier[terminal] keyword[in] identifier[self] . identifier[get_notebook] (). identifier[iter_terminals] (): identifier[page_num] = identifier[self] . identifier[get_notebook] (). identifier[page_num] ( identifier[terminal] . identifier[get_parent] ()) identifier[self] . identifier[get_notebook] (). identifier[rename_page] ( identifier[page_num] , identifier[self] . identifier[compute_tab_title] ( identifier[terminal] ), keyword[False] )
def recompute_tabs_titles(self): """Updates labels on all tabs. This is required when `self.abbreviate` changes """ use_vte_titles = self.settings.general.get_boolean('use-vte-titles') if not use_vte_titles: return # depends on [control=['if'], data=[]] # TODO NOTEBOOK this code only works if there is only one terminal in a # page, this need to be rewritten for terminal in self.get_notebook().iter_terminals(): page_num = self.get_notebook().page_num(terminal.get_parent()) self.get_notebook().rename_page(page_num, self.compute_tab_title(terminal), False) # depends on [control=['for'], data=['terminal']]
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_enode_mac_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_enode_mac_address = ET.SubElement(fcoe_intf_list, "fcoe-intf-enode-mac-address") fcoe_intf_enode_mac_address.text = kwargs.pop('fcoe_intf_enode_mac_address') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_enode_mac_address, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[fcoe_get_interface] assign[=] call[name[ET].Element, parameter[constant[fcoe_get_interface]]] variable[config] assign[=] name[fcoe_get_interface] variable[output] assign[=] call[name[ET].SubElement, parameter[name[fcoe_get_interface], constant[output]]] variable[fcoe_intf_list] assign[=] call[name[ET].SubElement, parameter[name[output], constant[fcoe-intf-list]]] variable[fcoe_intf_fcoe_port_id_key] assign[=] call[name[ET].SubElement, parameter[name[fcoe_intf_list], constant[fcoe-intf-fcoe-port-id]]] name[fcoe_intf_fcoe_port_id_key].text assign[=] call[name[kwargs].pop, parameter[constant[fcoe_intf_fcoe_port_id]]] variable[fcoe_intf_enode_mac_address] assign[=] call[name[ET].SubElement, parameter[name[fcoe_intf_list], constant[fcoe-intf-enode-mac-address]]] name[fcoe_intf_enode_mac_address].text assign[=] call[name[kwargs].pop, parameter[constant[fcoe_intf_enode_mac_address]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_enode_mac_address] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[fcoe_get_interface] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[fcoe_get_interface] identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_get_interface] , literal[string] ) identifier[fcoe_intf_list] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] ) identifier[fcoe_intf_fcoe_port_id_key] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_intf_list] , literal[string] ) identifier[fcoe_intf_fcoe_port_id_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[fcoe_intf_enode_mac_address] = identifier[ET] . identifier[SubElement] ( identifier[fcoe_intf_list] , literal[string] ) identifier[fcoe_intf_enode_mac_address] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_enode_mac_address(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') fcoe_get_interface = ET.Element('fcoe_get_interface') config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, 'output') fcoe_intf_list = ET.SubElement(output, 'fcoe-intf-list') fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, 'fcoe-intf-fcoe-port-id') fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_enode_mac_address = ET.SubElement(fcoe_intf_list, 'fcoe-intf-enode-mac-address') fcoe_intf_enode_mac_address.text = kwargs.pop('fcoe_intf_enode_mac_address') callback = kwargs.pop('callback', self._callback) return callback(config)
def _close(self, fd): """ Close the descriptor used for a path regardless of mode. """ if self._mode == WF_INOTIFYX: try: pynotifyx.rm_watch(self._inx_fd, fd) except: pass else: try: os.close(fd) except: pass
def function[_close, parameter[self, fd]]: constant[ Close the descriptor used for a path regardless of mode. ] if compare[name[self]._mode equal[==] name[WF_INOTIFYX]] begin[:] <ast.Try object at 0x7da2049604f0>
keyword[def] identifier[_close] ( identifier[self] , identifier[fd] ): literal[string] keyword[if] identifier[self] . identifier[_mode] == identifier[WF_INOTIFYX] : keyword[try] : identifier[pynotifyx] . identifier[rm_watch] ( identifier[self] . identifier[_inx_fd] , identifier[fd] ) keyword[except] : keyword[pass] keyword[else] : keyword[try] : identifier[os] . identifier[close] ( identifier[fd] ) keyword[except] : keyword[pass]
def _close(self, fd): """ Close the descriptor used for a path regardless of mode. """ if self._mode == WF_INOTIFYX: try: pynotifyx.rm_watch(self._inx_fd, fd) # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: try: os.close(fd) # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]]
def add_stream(self, policy): """ Add a stream to the SRTP session, applying the given `policy` to the stream. :param policy: :class:`Policy` """ _srtp_assert(lib.srtp_add_stream(self._srtp[0], policy._policy))
def function[add_stream, parameter[self, policy]]: constant[ Add a stream to the SRTP session, applying the given `policy` to the stream. :param policy: :class:`Policy` ] call[name[_srtp_assert], parameter[call[name[lib].srtp_add_stream, parameter[call[name[self]._srtp][constant[0]], name[policy]._policy]]]]
keyword[def] identifier[add_stream] ( identifier[self] , identifier[policy] ): literal[string] identifier[_srtp_assert] ( identifier[lib] . identifier[srtp_add_stream] ( identifier[self] . identifier[_srtp] [ literal[int] ], identifier[policy] . identifier[_policy] ))
def add_stream(self, policy): """ Add a stream to the SRTP session, applying the given `policy` to the stream. :param policy: :class:`Policy` """ _srtp_assert(lib.srtp_add_stream(self._srtp[0], policy._policy))
def _do_bgread(stream, blockSizeLimit, pollTime, closeStream, results): ''' _do_bgread - Worker functon for the background read thread. @param stream <object> - Stream to read until closed @param results <BackgroundReadData> ''' # Put the whole function in a try instead of just the read portion for performance reasons. try: while True: nextData = nonblock_read(stream, limit=blockSizeLimit) if nextData is None: break elif nextData: results.addBlock(nextData) time.sleep(pollTime) except Exception as e: results.error = e return if closeStream and hasattr(stream, 'close'): stream.close() results.isFinished = True
def function[_do_bgread, parameter[stream, blockSizeLimit, pollTime, closeStream, results]]: constant[ _do_bgread - Worker functon for the background read thread. @param stream <object> - Stream to read until closed @param results <BackgroundReadData> ] <ast.Try object at 0x7da1b26482e0> if <ast.BoolOp object at 0x7da2044c17e0> begin[:] call[name[stream].close, parameter[]] name[results].isFinished assign[=] constant[True]
keyword[def] identifier[_do_bgread] ( identifier[stream] , identifier[blockSizeLimit] , identifier[pollTime] , identifier[closeStream] , identifier[results] ): literal[string] keyword[try] : keyword[while] keyword[True] : identifier[nextData] = identifier[nonblock_read] ( identifier[stream] , identifier[limit] = identifier[blockSizeLimit] ) keyword[if] identifier[nextData] keyword[is] keyword[None] : keyword[break] keyword[elif] identifier[nextData] : identifier[results] . identifier[addBlock] ( identifier[nextData] ) identifier[time] . identifier[sleep] ( identifier[pollTime] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[results] . identifier[error] = identifier[e] keyword[return] keyword[if] identifier[closeStream] keyword[and] identifier[hasattr] ( identifier[stream] , literal[string] ): identifier[stream] . identifier[close] () identifier[results] . identifier[isFinished] = keyword[True]
def _do_bgread(stream, blockSizeLimit, pollTime, closeStream, results): """ _do_bgread - Worker functon for the background read thread. @param stream <object> - Stream to read until closed @param results <BackgroundReadData> """ # Put the whole function in a try instead of just the read portion for performance reasons. try: while True: nextData = nonblock_read(stream, limit=blockSizeLimit) if nextData is None: break # depends on [control=['if'], data=[]] elif nextData: results.addBlock(nextData) # depends on [control=['if'], data=[]] time.sleep(pollTime) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]] except Exception as e: results.error = e return # depends on [control=['except'], data=['e']] if closeStream and hasattr(stream, 'close'): stream.close() # depends on [control=['if'], data=[]] results.isFinished = True
def key(string): """Return a Czech sort key for the given string :param string: string (unicode in Python 2) Comparing the sort keys of two strings will give the result according to how the strings would compare in Czech collation order, i.e. ``key(s1) < key(s2)`` <=> ``s1`` comes before ``s2`` The structure of the sort key may change in the future. The only operations guaranteed to work on it are comparisons and equality checks (<, ==, etc.) against other keys. """ # The multi-level key is a nested tuple containing strings and ints. # The tuple contains sub-keys that roughly correspond to levels in # UTS #10 (http://unicode.org/reports/tr10/). Except for fallback strings # at the end, each contains a tuple of typically one key per element/letter. # - Alphabet: # Separators (0, p, l, w) # p: -no. of paragraph separators # l: -no. of line separators # w: -no. of word separators (spaces) # Letters (1, l); l is the base letter, lowercased # Special letters: 'č' shows up as 'cx'; 'ř' as 'rx', etc. # the 'ch' digraph becomes 'hx' # Numbers (2, n); n is int(numeric value * 100) # Missing for non-letters # - Diacritics (p, n, s) # p: position (above, below, behind, in front, in/over/around, unknown) # (as a sorted tuple of indices) # s: shape (dot, grave, breve, ..., unknown) # (as a sorted tuple of indices) # Missing for non-letters; empty if diacritics included in base (e.g. ř) # - Case: True for uppercased letters # Missing for non-letters # - Punctuation: see PUNCTUATION_MAP below # - (fallback) NFKD-normalized string # - (fallback) original string subkeys = [], [], [], [] add_alphabet = subkeys[0].append add_diacritic = subkeys[1].append add_case = subkeys[2].append add_punctuation = subkeys[3].append skip = 0 normal = nfkd(string).rstrip() diacritics = [] for i, char in enumerate(normal): if skip > 0: skip -= 1 continue category = get_category(char) cat0, cat1 = category if cat0 == 'L': # Letter (Lowercase, Modifier, Other, Titlecase, Uppercase) char_lower = char.lower() found = False if char_lower in DECOMPOSING_EXTRAS: # stuff like Ł doesn't decompose in Unicode; do it manually char_lower, _extra_diacritics = DECOMPOSING_EXTRAS[char] diacritics.extend(_extra_diacritics) for next in normal[i+1:]: if next == HACEK and char_lower in ('c', 'r', 's', 'z'): skip += 1 char_lower = char_lower + 'x' elif char_lower == 'c' and next.lower() == 'h': skip += 1 char_lower = 'hx' break elif next in DIACRITICS_MAP: skip += 1 diacritics.extend(DIACRITICS_MAP[next]) elif unicodedata.category(char)[0] == 'M': skip += 1 diacritics.append((POS_UNKNOWN, SH_UNKNOWN)) else: break add_alphabet((1, char_lower)) if diacritics: add_diacritic(make_diacritics_key(diacritics)) else: add_diacritic(()) add_case(cat1 in ('u', 't')) # upper & title case add_punctuation((0, )) diacritics = [] elif cat0 == 'Z': # Separator (Line, Paragraph, Space) counts = {'Zp': 0, 'Zl': 0, 'Zs': 0} counts[category] = 1 for next in normal[i+1:]: next_cat = get_category(next) if next_cat[0] == 'Z': counts[next_cat] += 1 skip += 1 else: break add_alphabet((0, -counts['Zp'], -counts['Zl'], -counts['Zs'])) add_diacritic(()) add_case(False) add_punctuation((0, )) elif char in DIACRITICS_BEFORE_MAP: diacritics.extend(DIACRITICS_BEFORE_MAP[char]) elif char in DIACRITICS_MAP: diacritics.extend(DIACRITICS_MAP[char]) elif char in PUNCTUATION_MAP: add_punctuation(PUNCTUATION_MAP[char]) elif cat0 == 'P': # Punctuation (Connector, Dash, Open/Close, Final/Initial Quote, Other) add_punctuation((3, )) elif cat0 == 'N': # Number (Decimal digit, Letter, Other) add_alphabet((2, int(unicodedata.numeric(char, 0)) * 100)) add_diacritic(()) add_case(False) add_punctuation((0, )) elif cat0 == 'S': # Symbol (Currency, Modifier, Math) add_punctuation((3, )) elif cat0 == 'C': # Other (Control, Format, Not Assigned, Private Use, Surrogate) pass elif cat0 == 'M': # Mark (Spacing Combining, Enclosing, Nonspacing) # TODO diacritics.append((POS_FRONT, SH_UNKNOWN)) else: raise ValueError('Unknown Unicode category') if diacritics: add_diacritic(make_diacritics_key(diacritics)) diacritics = [] return tuple(tuple(k) for k in subkeys) + (normal, string)
def function[key, parameter[string]]: constant[Return a Czech sort key for the given string :param string: string (unicode in Python 2) Comparing the sort keys of two strings will give the result according to how the strings would compare in Czech collation order, i.e. ``key(s1) < key(s2)`` <=> ``s1`` comes before ``s2`` The structure of the sort key may change in the future. The only operations guaranteed to work on it are comparisons and equality checks (<, ==, etc.) against other keys. ] variable[subkeys] assign[=] tuple[[<ast.List object at 0x7da204347310>, <ast.List object at 0x7da204346590>, <ast.List object at 0x7da204347c70>, <ast.List object at 0x7da204344e80>]] variable[add_alphabet] assign[=] call[name[subkeys]][constant[0]].append variable[add_diacritic] assign[=] call[name[subkeys]][constant[1]].append variable[add_case] assign[=] call[name[subkeys]][constant[2]].append variable[add_punctuation] assign[=] call[name[subkeys]][constant[3]].append variable[skip] assign[=] constant[0] variable[normal] assign[=] call[call[name[nfkd], parameter[name[string]]].rstrip, parameter[]] variable[diacritics] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da204346a10>, <ast.Name object at 0x7da2043446d0>]]] in starred[call[name[enumerate], parameter[name[normal]]]] begin[:] if compare[name[skip] greater[>] constant[0]] begin[:] <ast.AugAssign object at 0x7da2043465f0> continue variable[category] assign[=] call[name[get_category], parameter[name[char]]] <ast.Tuple object at 0x7da2043441c0> assign[=] name[category] if compare[name[cat0] equal[==] constant[L]] begin[:] variable[char_lower] assign[=] call[name[char].lower, parameter[]] variable[found] assign[=] constant[False] if compare[name[char_lower] in name[DECOMPOSING_EXTRAS]] begin[:] <ast.Tuple object at 0x7da1b14c49d0> assign[=] call[name[DECOMPOSING_EXTRAS]][name[char]] call[name[diacritics].extend, parameter[name[_extra_diacritics]]] for taget[name[next]] in starred[call[name[normal]][<ast.Slice object at 0x7da1b26aceb0>]] begin[:] if <ast.BoolOp object at 0x7da1b26ae980> begin[:] <ast.AugAssign object at 0x7da1b26acac0> variable[char_lower] assign[=] binary_operation[name[char_lower] + constant[x]] call[name[add_alphabet], parameter[tuple[[<ast.Constant object at 0x7da1b26acd60>, <ast.Name object at 0x7da1b26acf40>]]]] if name[diacritics] begin[:] call[name[add_diacritic], parameter[call[name[make_diacritics_key], parameter[name[diacritics]]]]] call[name[add_case], parameter[compare[name[cat1] in tuple[[<ast.Constant object at 0x7da18eb57490>, <ast.Constant object at 0x7da18eb55660>]]]]] call[name[add_punctuation], parameter[tuple[[<ast.Constant object at 0x7da1b14c5ba0>]]]] variable[diacritics] assign[=] list[[]] if name[diacritics] begin[:] call[name[add_diacritic], parameter[call[name[make_diacritics_key], parameter[name[diacritics]]]]] variable[diacritics] assign[=] list[[]] return[binary_operation[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b1365390>]] + tuple[[<ast.Name object at 0x7da1b1366050>, <ast.Name object at 0x7da1b1367640>]]]]
keyword[def] identifier[key] ( identifier[string] ): literal[string] identifier[subkeys] =[],[],[],[] identifier[add_alphabet] = identifier[subkeys] [ literal[int] ]. identifier[append] identifier[add_diacritic] = identifier[subkeys] [ literal[int] ]. identifier[append] identifier[add_case] = identifier[subkeys] [ literal[int] ]. identifier[append] identifier[add_punctuation] = identifier[subkeys] [ literal[int] ]. identifier[append] identifier[skip] = literal[int] identifier[normal] = identifier[nfkd] ( identifier[string] ). identifier[rstrip] () identifier[diacritics] =[] keyword[for] identifier[i] , identifier[char] keyword[in] identifier[enumerate] ( identifier[normal] ): keyword[if] identifier[skip] > literal[int] : identifier[skip] -= literal[int] keyword[continue] identifier[category] = identifier[get_category] ( identifier[char] ) identifier[cat0] , identifier[cat1] = identifier[category] keyword[if] identifier[cat0] == literal[string] : identifier[char_lower] = identifier[char] . identifier[lower] () identifier[found] = keyword[False] keyword[if] identifier[char_lower] keyword[in] identifier[DECOMPOSING_EXTRAS] : identifier[char_lower] , identifier[_extra_diacritics] = identifier[DECOMPOSING_EXTRAS] [ identifier[char] ] identifier[diacritics] . identifier[extend] ( identifier[_extra_diacritics] ) keyword[for] identifier[next] keyword[in] identifier[normal] [ identifier[i] + literal[int] :]: keyword[if] identifier[next] == identifier[HACEK] keyword[and] identifier[char_lower] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ): identifier[skip] += literal[int] identifier[char_lower] = identifier[char_lower] + literal[string] keyword[elif] identifier[char_lower] == literal[string] keyword[and] identifier[next] . identifier[lower] ()== literal[string] : identifier[skip] += literal[int] identifier[char_lower] = literal[string] keyword[break] keyword[elif] identifier[next] keyword[in] identifier[DIACRITICS_MAP] : identifier[skip] += literal[int] identifier[diacritics] . identifier[extend] ( identifier[DIACRITICS_MAP] [ identifier[next] ]) keyword[elif] identifier[unicodedata] . identifier[category] ( identifier[char] )[ literal[int] ]== literal[string] : identifier[skip] += literal[int] identifier[diacritics] . identifier[append] (( identifier[POS_UNKNOWN] , identifier[SH_UNKNOWN] )) keyword[else] : keyword[break] identifier[add_alphabet] (( literal[int] , identifier[char_lower] )) keyword[if] identifier[diacritics] : identifier[add_diacritic] ( identifier[make_diacritics_key] ( identifier[diacritics] )) keyword[else] : identifier[add_diacritic] (()) identifier[add_case] ( identifier[cat1] keyword[in] ( literal[string] , literal[string] )) identifier[add_punctuation] (( literal[int] ,)) identifier[diacritics] =[] keyword[elif] identifier[cat0] == literal[string] : identifier[counts] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] } identifier[counts] [ identifier[category] ]= literal[int] keyword[for] identifier[next] keyword[in] identifier[normal] [ identifier[i] + literal[int] :]: identifier[next_cat] = identifier[get_category] ( identifier[next] ) keyword[if] identifier[next_cat] [ literal[int] ]== literal[string] : identifier[counts] [ identifier[next_cat] ]+= literal[int] identifier[skip] += literal[int] keyword[else] : keyword[break] identifier[add_alphabet] (( literal[int] ,- identifier[counts] [ literal[string] ],- identifier[counts] [ literal[string] ],- identifier[counts] [ literal[string] ])) identifier[add_diacritic] (()) identifier[add_case] ( keyword[False] ) identifier[add_punctuation] (( literal[int] ,)) keyword[elif] identifier[char] keyword[in] identifier[DIACRITICS_BEFORE_MAP] : identifier[diacritics] . identifier[extend] ( identifier[DIACRITICS_BEFORE_MAP] [ identifier[char] ]) keyword[elif] identifier[char] keyword[in] identifier[DIACRITICS_MAP] : identifier[diacritics] . identifier[extend] ( identifier[DIACRITICS_MAP] [ identifier[char] ]) keyword[elif] identifier[char] keyword[in] identifier[PUNCTUATION_MAP] : identifier[add_punctuation] ( identifier[PUNCTUATION_MAP] [ identifier[char] ]) keyword[elif] identifier[cat0] == literal[string] : identifier[add_punctuation] (( literal[int] ,)) keyword[elif] identifier[cat0] == literal[string] : identifier[add_alphabet] (( literal[int] , identifier[int] ( identifier[unicodedata] . identifier[numeric] ( identifier[char] , literal[int] ))* literal[int] )) identifier[add_diacritic] (()) identifier[add_case] ( keyword[False] ) identifier[add_punctuation] (( literal[int] ,)) keyword[elif] identifier[cat0] == literal[string] : identifier[add_punctuation] (( literal[int] ,)) keyword[elif] identifier[cat0] == literal[string] : keyword[pass] keyword[elif] identifier[cat0] == literal[string] : identifier[diacritics] . identifier[append] (( identifier[POS_FRONT] , identifier[SH_UNKNOWN] )) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[diacritics] : identifier[add_diacritic] ( identifier[make_diacritics_key] ( identifier[diacritics] )) identifier[diacritics] =[] keyword[return] identifier[tuple] ( identifier[tuple] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[subkeys] )+( identifier[normal] , identifier[string] )
def key(string): """Return a Czech sort key for the given string :param string: string (unicode in Python 2) Comparing the sort keys of two strings will give the result according to how the strings would compare in Czech collation order, i.e. ``key(s1) < key(s2)`` <=> ``s1`` comes before ``s2`` The structure of the sort key may change in the future. The only operations guaranteed to work on it are comparisons and equality checks (<, ==, etc.) against other keys. """ # The multi-level key is a nested tuple containing strings and ints. # The tuple contains sub-keys that roughly correspond to levels in # UTS #10 (http://unicode.org/reports/tr10/). Except for fallback strings # at the end, each contains a tuple of typically one key per element/letter. # - Alphabet: # Separators (0, p, l, w) # p: -no. of paragraph separators # l: -no. of line separators # w: -no. of word separators (spaces) # Letters (1, l); l is the base letter, lowercased # Special letters: 'č' shows up as 'cx'; 'ř' as 'rx', etc. # the 'ch' digraph becomes 'hx' # Numbers (2, n); n is int(numeric value * 100) # Missing for non-letters # - Diacritics (p, n, s) # p: position (above, below, behind, in front, in/over/around, unknown) # (as a sorted tuple of indices) # s: shape (dot, grave, breve, ..., unknown) # (as a sorted tuple of indices) # Missing for non-letters; empty if diacritics included in base (e.g. ř) # - Case: True for uppercased letters # Missing for non-letters # - Punctuation: see PUNCTUATION_MAP below # - (fallback) NFKD-normalized string # - (fallback) original string subkeys = ([], [], [], []) add_alphabet = subkeys[0].append add_diacritic = subkeys[1].append add_case = subkeys[2].append add_punctuation = subkeys[3].append skip = 0 normal = nfkd(string).rstrip() diacritics = [] for (i, char) in enumerate(normal): if skip > 0: skip -= 1 continue # depends on [control=['if'], data=['skip']] category = get_category(char) (cat0, cat1) = category if cat0 == 'L': # Letter (Lowercase, Modifier, Other, Titlecase, Uppercase) char_lower = char.lower() found = False if char_lower in DECOMPOSING_EXTRAS: # stuff like Ł doesn't decompose in Unicode; do it manually (char_lower, _extra_diacritics) = DECOMPOSING_EXTRAS[char] diacritics.extend(_extra_diacritics) # depends on [control=['if'], data=['char_lower', 'DECOMPOSING_EXTRAS']] for next in normal[i + 1:]: if next == HACEK and char_lower in ('c', 'r', 's', 'z'): skip += 1 char_lower = char_lower + 'x' # depends on [control=['if'], data=[]] elif char_lower == 'c' and next.lower() == 'h': skip += 1 char_lower = 'hx' break # depends on [control=['if'], data=[]] elif next in DIACRITICS_MAP: skip += 1 diacritics.extend(DIACRITICS_MAP[next]) # depends on [control=['if'], data=['next', 'DIACRITICS_MAP']] elif unicodedata.category(char)[0] == 'M': skip += 1 diacritics.append((POS_UNKNOWN, SH_UNKNOWN)) # depends on [control=['if'], data=[]] else: break # depends on [control=['for'], data=['next']] add_alphabet((1, char_lower)) if diacritics: add_diacritic(make_diacritics_key(diacritics)) # depends on [control=['if'], data=[]] else: add_diacritic(()) add_case(cat1 in ('u', 't')) # upper & title case add_punctuation((0,)) diacritics = [] # depends on [control=['if'], data=[]] elif cat0 == 'Z': # Separator (Line, Paragraph, Space) counts = {'Zp': 0, 'Zl': 0, 'Zs': 0} counts[category] = 1 for next in normal[i + 1:]: next_cat = get_category(next) if next_cat[0] == 'Z': counts[next_cat] += 1 skip += 1 # depends on [control=['if'], data=[]] else: break # depends on [control=['for'], data=['next']] add_alphabet((0, -counts['Zp'], -counts['Zl'], -counts['Zs'])) add_diacritic(()) add_case(False) add_punctuation((0,)) # depends on [control=['if'], data=[]] elif char in DIACRITICS_BEFORE_MAP: diacritics.extend(DIACRITICS_BEFORE_MAP[char]) # depends on [control=['if'], data=['char', 'DIACRITICS_BEFORE_MAP']] elif char in DIACRITICS_MAP: diacritics.extend(DIACRITICS_MAP[char]) # depends on [control=['if'], data=['char', 'DIACRITICS_MAP']] elif char in PUNCTUATION_MAP: add_punctuation(PUNCTUATION_MAP[char]) # depends on [control=['if'], data=['char', 'PUNCTUATION_MAP']] elif cat0 == 'P': # Punctuation (Connector, Dash, Open/Close, Final/Initial Quote, Other) add_punctuation((3,)) # depends on [control=['if'], data=[]] elif cat0 == 'N': # Number (Decimal digit, Letter, Other) add_alphabet((2, int(unicodedata.numeric(char, 0)) * 100)) add_diacritic(()) add_case(False) add_punctuation((0,)) # depends on [control=['if'], data=[]] elif cat0 == 'S': # Symbol (Currency, Modifier, Math) add_punctuation((3,)) # depends on [control=['if'], data=[]] elif cat0 == 'C': # Other (Control, Format, Not Assigned, Private Use, Surrogate) pass # depends on [control=['if'], data=[]] elif cat0 == 'M': # Mark (Spacing Combining, Enclosing, Nonspacing) # TODO diacritics.append((POS_FRONT, SH_UNKNOWN)) # depends on [control=['if'], data=[]] else: raise ValueError('Unknown Unicode category') # depends on [control=['for'], data=[]] if diacritics: add_diacritic(make_diacritics_key(diacritics)) diacritics = [] # depends on [control=['if'], data=[]] return tuple((tuple(k) for k in subkeys)) + (normal, string)
def _wavGetInfo(f:Union[IO, str]) -> Tuple[SndInfo, Dict[str, Any]]: """ Read the info of a wav file. taken mostly from scipy.io.wavfile if extended: returns also fsize and bigendian """ if isinstance(f, (str, bytes)): f = open(f, 'rb') needsclosing = True else: needsclosing = False fsize, bigendian = _wavReadRiff(f) fmt = ">i" if bigendian else "<i" while (f.tell() < fsize): chunk_id = f.read(4) if chunk_id == b'fmt ': chunksize, sampfmt, chans, sr, byterate, align, bits = _wavReadFmt(f, bigendian) elif chunk_id == b'data': datasize = _struct.unpack(fmt, f.read(4))[0] nframes = int(datasize / (chans * (bits / 8))) break else: _warnings.warn("chunk not understood: %s" % chunk_id) data = f.read(4) size = _struct.unpack(fmt, data)[0] f.seek(size, 1) encoding = _encoding(sampfmt, bits) if needsclosing: f.close() info = SndInfo(sr, nframes, chans, encoding, "wav") return info, {'fsize': fsize, 'bigendian': bigendian, 'datasize': datasize}
def function[_wavGetInfo, parameter[f]]: constant[ Read the info of a wav file. taken mostly from scipy.io.wavfile if extended: returns also fsize and bigendian ] if call[name[isinstance], parameter[name[f], tuple[[<ast.Name object at 0x7da20c993700>, <ast.Name object at 0x7da20c991540>]]]] begin[:] variable[f] assign[=] call[name[open], parameter[name[f], constant[rb]]] variable[needsclosing] assign[=] constant[True] <ast.Tuple object at 0x7da20c992350> assign[=] call[name[_wavReadRiff], parameter[name[f]]] variable[fmt] assign[=] <ast.IfExp object at 0x7da20c990940> while compare[call[name[f].tell, parameter[]] less[<] name[fsize]] begin[:] variable[chunk_id] assign[=] call[name[f].read, parameter[constant[4]]] if compare[name[chunk_id] equal[==] constant[b'fmt ']] begin[:] <ast.Tuple object at 0x7da20c9912a0> assign[=] call[name[_wavReadFmt], parameter[name[f], name[bigendian]]] variable[encoding] assign[=] call[name[_encoding], parameter[name[sampfmt], name[bits]]] if name[needsclosing] begin[:] call[name[f].close, parameter[]] variable[info] assign[=] call[name[SndInfo], parameter[name[sr], name[nframes], name[chans], name[encoding], constant[wav]]] return[tuple[[<ast.Name object at 0x7da18dc998a0>, <ast.Dict object at 0x7da18dc981f0>]]]
keyword[def] identifier[_wavGetInfo] ( identifier[f] : identifier[Union] [ identifier[IO] , identifier[str] ])-> identifier[Tuple] [ identifier[SndInfo] , identifier[Dict] [ identifier[str] , identifier[Any] ]]: literal[string] keyword[if] identifier[isinstance] ( identifier[f] ,( identifier[str] , identifier[bytes] )): identifier[f] = identifier[open] ( identifier[f] , literal[string] ) identifier[needsclosing] = keyword[True] keyword[else] : identifier[needsclosing] = keyword[False] identifier[fsize] , identifier[bigendian] = identifier[_wavReadRiff] ( identifier[f] ) identifier[fmt] = literal[string] keyword[if] identifier[bigendian] keyword[else] literal[string] keyword[while] ( identifier[f] . identifier[tell] ()< identifier[fsize] ): identifier[chunk_id] = identifier[f] . identifier[read] ( literal[int] ) keyword[if] identifier[chunk_id] == literal[string] : identifier[chunksize] , identifier[sampfmt] , identifier[chans] , identifier[sr] , identifier[byterate] , identifier[align] , identifier[bits] = identifier[_wavReadFmt] ( identifier[f] , identifier[bigendian] ) keyword[elif] identifier[chunk_id] == literal[string] : identifier[datasize] = identifier[_struct] . identifier[unpack] ( identifier[fmt] , identifier[f] . identifier[read] ( literal[int] ))[ literal[int] ] identifier[nframes] = identifier[int] ( identifier[datasize] /( identifier[chans] *( identifier[bits] / literal[int] ))) keyword[break] keyword[else] : identifier[_warnings] . identifier[warn] ( literal[string] % identifier[chunk_id] ) identifier[data] = identifier[f] . identifier[read] ( literal[int] ) identifier[size] = identifier[_struct] . identifier[unpack] ( identifier[fmt] , identifier[data] )[ literal[int] ] identifier[f] . identifier[seek] ( identifier[size] , literal[int] ) identifier[encoding] = identifier[_encoding] ( identifier[sampfmt] , identifier[bits] ) keyword[if] identifier[needsclosing] : identifier[f] . identifier[close] () identifier[info] = identifier[SndInfo] ( identifier[sr] , identifier[nframes] , identifier[chans] , identifier[encoding] , literal[string] ) keyword[return] identifier[info] ,{ literal[string] : identifier[fsize] , literal[string] : identifier[bigendian] , literal[string] : identifier[datasize] }
def _wavGetInfo(f: Union[IO, str]) -> Tuple[SndInfo, Dict[str, Any]]: """ Read the info of a wav file. taken mostly from scipy.io.wavfile if extended: returns also fsize and bigendian """ if isinstance(f, (str, bytes)): f = open(f, 'rb') needsclosing = True # depends on [control=['if'], data=[]] else: needsclosing = False (fsize, bigendian) = _wavReadRiff(f) fmt = '>i' if bigendian else '<i' while f.tell() < fsize: chunk_id = f.read(4) if chunk_id == b'fmt ': (chunksize, sampfmt, chans, sr, byterate, align, bits) = _wavReadFmt(f, bigendian) # depends on [control=['if'], data=[]] elif chunk_id == b'data': datasize = _struct.unpack(fmt, f.read(4))[0] nframes = int(datasize / (chans * (bits / 8))) break # depends on [control=['if'], data=[]] else: _warnings.warn('chunk not understood: %s' % chunk_id) data = f.read(4) size = _struct.unpack(fmt, data)[0] f.seek(size, 1) # depends on [control=['while'], data=[]] encoding = _encoding(sampfmt, bits) if needsclosing: f.close() # depends on [control=['if'], data=[]] info = SndInfo(sr, nframes, chans, encoding, 'wav') return (info, {'fsize': fsize, 'bigendian': bigendian, 'datasize': datasize})
def _make_map(self, limit): """ Make vegas grid that is adapted to the pdf. """ ny = 2000 y = numpy.random.uniform(0., 1., (ny,1)) limit = numpy.arctan(limit) m = AdaptiveMap([[-limit, limit]], ninc=100) theta = numpy.empty(y.shape, float) jac = numpy.empty(y.shape[0], float) for itn in range(10): m.map(y, theta, jac) tan_theta = numpy.tan(theta[:, 0]) x = self.scale * tan_theta fx = (tan_theta ** 2 + 1) * numpy.exp(-(x ** 2) / 2.) m.add_training_data(y, (jac * fx) ** 2) m.adapt(alpha=1.5) return numpy.array(m.grid[0])
def function[_make_map, parameter[self, limit]]: constant[ Make vegas grid that is adapted to the pdf. ] variable[ny] assign[=] constant[2000] variable[y] assign[=] call[name[numpy].random.uniform, parameter[constant[0.0], constant[1.0], tuple[[<ast.Name object at 0x7da1b04d8070>, <ast.Constant object at 0x7da1b04da950>]]]] variable[limit] assign[=] call[name[numpy].arctan, parameter[name[limit]]] variable[m] assign[=] call[name[AdaptiveMap], parameter[list[[<ast.List object at 0x7da1b04d83d0>]]]] variable[theta] assign[=] call[name[numpy].empty, parameter[name[y].shape, name[float]]] variable[jac] assign[=] call[name[numpy].empty, parameter[call[name[y].shape][constant[0]], name[float]]] for taget[name[itn]] in starred[call[name[range], parameter[constant[10]]]] begin[:] call[name[m].map, parameter[name[y], name[theta], name[jac]]] variable[tan_theta] assign[=] call[name[numpy].tan, parameter[call[name[theta]][tuple[[<ast.Slice object at 0x7da1b05db0a0>, <ast.Constant object at 0x7da1b05d9bd0>]]]]] variable[x] assign[=] binary_operation[name[self].scale * name[tan_theta]] variable[fx] assign[=] binary_operation[binary_operation[binary_operation[name[tan_theta] ** constant[2]] + constant[1]] * call[name[numpy].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b05d9e70> / constant[2.0]]]]] call[name[m].add_training_data, parameter[name[y], binary_operation[binary_operation[name[jac] * name[fx]] ** constant[2]]]] call[name[m].adapt, parameter[]] return[call[name[numpy].array, parameter[call[name[m].grid][constant[0]]]]]
keyword[def] identifier[_make_map] ( identifier[self] , identifier[limit] ): literal[string] identifier[ny] = literal[int] identifier[y] = identifier[numpy] . identifier[random] . identifier[uniform] ( literal[int] , literal[int] ,( identifier[ny] , literal[int] )) identifier[limit] = identifier[numpy] . identifier[arctan] ( identifier[limit] ) identifier[m] = identifier[AdaptiveMap] ([[- identifier[limit] , identifier[limit] ]], identifier[ninc] = literal[int] ) identifier[theta] = identifier[numpy] . identifier[empty] ( identifier[y] . identifier[shape] , identifier[float] ) identifier[jac] = identifier[numpy] . identifier[empty] ( identifier[y] . identifier[shape] [ literal[int] ], identifier[float] ) keyword[for] identifier[itn] keyword[in] identifier[range] ( literal[int] ): identifier[m] . identifier[map] ( identifier[y] , identifier[theta] , identifier[jac] ) identifier[tan_theta] = identifier[numpy] . identifier[tan] ( identifier[theta] [:, literal[int] ]) identifier[x] = identifier[self] . identifier[scale] * identifier[tan_theta] identifier[fx] =( identifier[tan_theta] ** literal[int] + literal[int] )* identifier[numpy] . identifier[exp] (-( identifier[x] ** literal[int] )/ literal[int] ) identifier[m] . identifier[add_training_data] ( identifier[y] ,( identifier[jac] * identifier[fx] )** literal[int] ) identifier[m] . identifier[adapt] ( identifier[alpha] = literal[int] ) keyword[return] identifier[numpy] . identifier[array] ( identifier[m] . identifier[grid] [ literal[int] ])
def _make_map(self, limit): """ Make vegas grid that is adapted to the pdf. """ ny = 2000 y = numpy.random.uniform(0.0, 1.0, (ny, 1)) limit = numpy.arctan(limit) m = AdaptiveMap([[-limit, limit]], ninc=100) theta = numpy.empty(y.shape, float) jac = numpy.empty(y.shape[0], float) for itn in range(10): m.map(y, theta, jac) tan_theta = numpy.tan(theta[:, 0]) x = self.scale * tan_theta fx = (tan_theta ** 2 + 1) * numpy.exp(-x ** 2 / 2.0) m.add_training_data(y, (jac * fx) ** 2) m.adapt(alpha=1.5) # depends on [control=['for'], data=[]] return numpy.array(m.grid[0])
def start(self): """ Starts the connection """ self.__stop = False self._queue.start() self._zk.start()
def function[start, parameter[self]]: constant[ Starts the connection ] name[self].__stop assign[=] constant[False] call[name[self]._queue.start, parameter[]] call[name[self]._zk.start, parameter[]]
keyword[def] identifier[start] ( identifier[self] ): literal[string] identifier[self] . identifier[__stop] = keyword[False] identifier[self] . identifier[_queue] . identifier[start] () identifier[self] . identifier[_zk] . identifier[start] ()
def start(self): """ Starts the connection """ self.__stop = False self._queue.start() self._zk.start()
def _heartbeat(self): """Heartbeat callback""" if self.handler is not None: self.handler.send_pack(proto.HEARTBEAT) else: self.stop_heartbeat()
def function[_heartbeat, parameter[self]]: constant[Heartbeat callback] if compare[name[self].handler is_not constant[None]] begin[:] call[name[self].handler.send_pack, parameter[name[proto].HEARTBEAT]]
keyword[def] identifier[_heartbeat] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[handler] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[handler] . identifier[send_pack] ( identifier[proto] . identifier[HEARTBEAT] ) keyword[else] : identifier[self] . identifier[stop_heartbeat] ()
def _heartbeat(self): """Heartbeat callback""" if self.handler is not None: self.handler.send_pack(proto.HEARTBEAT) # depends on [control=['if'], data=[]] else: self.stop_heartbeat()
def WriteEventBody(self, event): """Writes the body of an event object to the spreadsheet. Args: event (EventObject): event. """ for field_name in self._fields: if field_name == 'datetime': output_value = self._FormatDateTime(event) else: output_value = self._dynamic_fields_helper.GetFormattedField( event, field_name) output_value = self._RemoveIllegalXMLCharacters(output_value) # Auto adjust the column width based on the length of the output value. column_index = self._fields.index(field_name) self._column_widths.setdefault(column_index, 0) if field_name == 'datetime': column_width = min( self._MAX_COLUMN_WIDTH, len(self._timestamp_format) + 2) else: column_width = min(self._MAX_COLUMN_WIDTH, len(output_value) + 2) self._column_widths[column_index] = max( self._MIN_COLUMN_WIDTH, self._column_widths[column_index], column_width) self._sheet.set_column( column_index, column_index, self._column_widths[column_index]) if (field_name == 'datetime' and isinstance(output_value, datetime.datetime)): self._sheet.write_datetime( self._current_row, column_index, output_value) else: self._sheet.write(self._current_row, column_index, output_value) self._current_row += 1
def function[WriteEventBody, parameter[self, event]]: constant[Writes the body of an event object to the spreadsheet. Args: event (EventObject): event. ] for taget[name[field_name]] in starred[name[self]._fields] begin[:] if compare[name[field_name] equal[==] constant[datetime]] begin[:] variable[output_value] assign[=] call[name[self]._FormatDateTime, parameter[name[event]]] variable[output_value] assign[=] call[name[self]._RemoveIllegalXMLCharacters, parameter[name[output_value]]] variable[column_index] assign[=] call[name[self]._fields.index, parameter[name[field_name]]] call[name[self]._column_widths.setdefault, parameter[name[column_index], constant[0]]] if compare[name[field_name] equal[==] constant[datetime]] begin[:] variable[column_width] assign[=] call[name[min], parameter[name[self]._MAX_COLUMN_WIDTH, binary_operation[call[name[len], parameter[name[self]._timestamp_format]] + constant[2]]]] call[name[self]._column_widths][name[column_index]] assign[=] call[name[max], parameter[name[self]._MIN_COLUMN_WIDTH, call[name[self]._column_widths][name[column_index]], name[column_width]]] call[name[self]._sheet.set_column, parameter[name[column_index], name[column_index], call[name[self]._column_widths][name[column_index]]]] if <ast.BoolOp object at 0x7da204623bb0> begin[:] call[name[self]._sheet.write_datetime, parameter[name[self]._current_row, name[column_index], name[output_value]]] <ast.AugAssign object at 0x7da2046204c0>
keyword[def] identifier[WriteEventBody] ( identifier[self] , identifier[event] ): literal[string] keyword[for] identifier[field_name] keyword[in] identifier[self] . identifier[_fields] : keyword[if] identifier[field_name] == literal[string] : identifier[output_value] = identifier[self] . identifier[_FormatDateTime] ( identifier[event] ) keyword[else] : identifier[output_value] = identifier[self] . identifier[_dynamic_fields_helper] . identifier[GetFormattedField] ( identifier[event] , identifier[field_name] ) identifier[output_value] = identifier[self] . identifier[_RemoveIllegalXMLCharacters] ( identifier[output_value] ) identifier[column_index] = identifier[self] . identifier[_fields] . identifier[index] ( identifier[field_name] ) identifier[self] . identifier[_column_widths] . identifier[setdefault] ( identifier[column_index] , literal[int] ) keyword[if] identifier[field_name] == literal[string] : identifier[column_width] = identifier[min] ( identifier[self] . identifier[_MAX_COLUMN_WIDTH] , identifier[len] ( identifier[self] . identifier[_timestamp_format] )+ literal[int] ) keyword[else] : identifier[column_width] = identifier[min] ( identifier[self] . identifier[_MAX_COLUMN_WIDTH] , identifier[len] ( identifier[output_value] )+ literal[int] ) identifier[self] . identifier[_column_widths] [ identifier[column_index] ]= identifier[max] ( identifier[self] . identifier[_MIN_COLUMN_WIDTH] , identifier[self] . identifier[_column_widths] [ identifier[column_index] ], identifier[column_width] ) identifier[self] . identifier[_sheet] . identifier[set_column] ( identifier[column_index] , identifier[column_index] , identifier[self] . identifier[_column_widths] [ identifier[column_index] ]) keyword[if] ( identifier[field_name] == literal[string] keyword[and] identifier[isinstance] ( identifier[output_value] , identifier[datetime] . identifier[datetime] )): identifier[self] . identifier[_sheet] . identifier[write_datetime] ( identifier[self] . identifier[_current_row] , identifier[column_index] , identifier[output_value] ) keyword[else] : identifier[self] . identifier[_sheet] . identifier[write] ( identifier[self] . identifier[_current_row] , identifier[column_index] , identifier[output_value] ) identifier[self] . identifier[_current_row] += literal[int]
def WriteEventBody(self, event): """Writes the body of an event object to the spreadsheet. Args: event (EventObject): event. """ for field_name in self._fields: if field_name == 'datetime': output_value = self._FormatDateTime(event) # depends on [control=['if'], data=[]] else: output_value = self._dynamic_fields_helper.GetFormattedField(event, field_name) output_value = self._RemoveIllegalXMLCharacters(output_value) # Auto adjust the column width based on the length of the output value. column_index = self._fields.index(field_name) self._column_widths.setdefault(column_index, 0) if field_name == 'datetime': column_width = min(self._MAX_COLUMN_WIDTH, len(self._timestamp_format) + 2) # depends on [control=['if'], data=[]] else: column_width = min(self._MAX_COLUMN_WIDTH, len(output_value) + 2) self._column_widths[column_index] = max(self._MIN_COLUMN_WIDTH, self._column_widths[column_index], column_width) self._sheet.set_column(column_index, column_index, self._column_widths[column_index]) if field_name == 'datetime' and isinstance(output_value, datetime.datetime): self._sheet.write_datetime(self._current_row, column_index, output_value) # depends on [control=['if'], data=[]] else: self._sheet.write(self._current_row, column_index, output_value) # depends on [control=['for'], data=['field_name']] self._current_row += 1
def get_dict_for_forms(self): """ Build a dictionnary where searchable_fields are next to their model to be use in modelform_factory dico = { "str(model)" : { "model" : Model, "fields" = [] #searchable_fields which are attribute of Model } } """ magic_dico = field_to_dict(self.searchable_fields) dico = {} def dict_from_fields_r(mini_dict, dico, model): """ Create the dico recursively from the magic_dico """ dico[str(model)] = {} dico[str(model)]["model"] = model dico[str(model)]["fields"] = [] for key, value in mini_dict.items(): if isinstance(value, bool): continue if value == EMPTY_DICT: dico[str(model)]["fields"].append(key) elif EMPTY_DICT.items() <= value.items(): dico[str(model)]["fields"].append(key) model_tmp = associate_model(model, key) dict_from_fields_r(value, dico, model_tmp) else: model_tmp = associate_model(model, key) dict_from_fields_r(value, dico, model_tmp) if magic_dico: dict_from_fields_r(magic_dico, dico, self.model) return dico
def function[get_dict_for_forms, parameter[self]]: constant[ Build a dictionnary where searchable_fields are next to their model to be use in modelform_factory dico = { "str(model)" : { "model" : Model, "fields" = [] #searchable_fields which are attribute of Model } } ] variable[magic_dico] assign[=] call[name[field_to_dict], parameter[name[self].searchable_fields]] variable[dico] assign[=] dictionary[[], []] def function[dict_from_fields_r, parameter[mini_dict, dico, model]]: constant[ Create the dico recursively from the magic_dico ] call[name[dico]][call[name[str], parameter[name[model]]]] assign[=] dictionary[[], []] call[call[name[dico]][call[name[str], parameter[name[model]]]]][constant[model]] assign[=] name[model] call[call[name[dico]][call[name[str], parameter[name[model]]]]][constant[fields]] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da20c6c7040>, <ast.Name object at 0x7da20c6c7fd0>]]] in starred[call[name[mini_dict].items, parameter[]]] begin[:] if call[name[isinstance], parameter[name[value], name[bool]]] begin[:] continue if compare[name[value] equal[==] name[EMPTY_DICT]] begin[:] call[call[call[name[dico]][call[name[str], parameter[name[model]]]]][constant[fields]].append, parameter[name[key]]] if name[magic_dico] begin[:] call[name[dict_from_fields_r], parameter[name[magic_dico], name[dico], name[self].model]] return[name[dico]]
keyword[def] identifier[get_dict_for_forms] ( identifier[self] ): literal[string] identifier[magic_dico] = identifier[field_to_dict] ( identifier[self] . identifier[searchable_fields] ) identifier[dico] ={} keyword[def] identifier[dict_from_fields_r] ( identifier[mini_dict] , identifier[dico] , identifier[model] ): literal[string] identifier[dico] [ identifier[str] ( identifier[model] )]={} identifier[dico] [ identifier[str] ( identifier[model] )][ literal[string] ]= identifier[model] identifier[dico] [ identifier[str] ( identifier[model] )][ literal[string] ]=[] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[mini_dict] . identifier[items] (): keyword[if] identifier[isinstance] ( identifier[value] , identifier[bool] ): keyword[continue] keyword[if] identifier[value] == identifier[EMPTY_DICT] : identifier[dico] [ identifier[str] ( identifier[model] )][ literal[string] ]. identifier[append] ( identifier[key] ) keyword[elif] identifier[EMPTY_DICT] . identifier[items] ()<= identifier[value] . identifier[items] (): identifier[dico] [ identifier[str] ( identifier[model] )][ literal[string] ]. identifier[append] ( identifier[key] ) identifier[model_tmp] = identifier[associate_model] ( identifier[model] , identifier[key] ) identifier[dict_from_fields_r] ( identifier[value] , identifier[dico] , identifier[model_tmp] ) keyword[else] : identifier[model_tmp] = identifier[associate_model] ( identifier[model] , identifier[key] ) identifier[dict_from_fields_r] ( identifier[value] , identifier[dico] , identifier[model_tmp] ) keyword[if] identifier[magic_dico] : identifier[dict_from_fields_r] ( identifier[magic_dico] , identifier[dico] , identifier[self] . identifier[model] ) keyword[return] identifier[dico]
def get_dict_for_forms(self): """ Build a dictionnary where searchable_fields are next to their model to be use in modelform_factory dico = { "str(model)" : { "model" : Model, "fields" = [] #searchable_fields which are attribute of Model } } """ magic_dico = field_to_dict(self.searchable_fields) dico = {} def dict_from_fields_r(mini_dict, dico, model): """ Create the dico recursively from the magic_dico """ dico[str(model)] = {} dico[str(model)]['model'] = model dico[str(model)]['fields'] = [] for (key, value) in mini_dict.items(): if isinstance(value, bool): continue # depends on [control=['if'], data=[]] if value == EMPTY_DICT: dico[str(model)]['fields'].append(key) # depends on [control=['if'], data=[]] elif EMPTY_DICT.items() <= value.items(): dico[str(model)]['fields'].append(key) model_tmp = associate_model(model, key) dict_from_fields_r(value, dico, model_tmp) # depends on [control=['if'], data=[]] else: model_tmp = associate_model(model, key) dict_from_fields_r(value, dico, model_tmp) # depends on [control=['for'], data=[]] if magic_dico: dict_from_fields_r(magic_dico, dico, self.model) # depends on [control=['if'], data=[]] return dico
def finalize_backreferences(seen_backrefs, gallery_conf): """Replace backref files only if necessary.""" logger = sphinx_compatibility.getLogger('sphinx-gallery') if gallery_conf['backreferences_dir'] is None: return for backref in seen_backrefs: path = os.path.join(gallery_conf['src_dir'], gallery_conf['backreferences_dir'], '%s.examples.new' % backref) if os.path.isfile(path): _replace_md5(path) else: level = gallery_conf['log_level'].get('backreference_missing', 'warning') func = getattr(logger, level) func('Could not find backreferences file: %s' % (path,)) func('The backreferences are likely to be erroneous ' 'due to file system case insensitivity.')
def function[finalize_backreferences, parameter[seen_backrefs, gallery_conf]]: constant[Replace backref files only if necessary.] variable[logger] assign[=] call[name[sphinx_compatibility].getLogger, parameter[constant[sphinx-gallery]]] if compare[call[name[gallery_conf]][constant[backreferences_dir]] is constant[None]] begin[:] return[None] for taget[name[backref]] in starred[name[seen_backrefs]] begin[:] variable[path] assign[=] call[name[os].path.join, parameter[call[name[gallery_conf]][constant[src_dir]], call[name[gallery_conf]][constant[backreferences_dir]], binary_operation[constant[%s.examples.new] <ast.Mod object at 0x7da2590d6920> name[backref]]]] if call[name[os].path.isfile, parameter[name[path]]] begin[:] call[name[_replace_md5], parameter[name[path]]]
keyword[def] identifier[finalize_backreferences] ( identifier[seen_backrefs] , identifier[gallery_conf] ): literal[string] identifier[logger] = identifier[sphinx_compatibility] . identifier[getLogger] ( literal[string] ) keyword[if] identifier[gallery_conf] [ literal[string] ] keyword[is] keyword[None] : keyword[return] keyword[for] identifier[backref] keyword[in] identifier[seen_backrefs] : identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[gallery_conf] [ literal[string] ], identifier[gallery_conf] [ literal[string] ], literal[string] % identifier[backref] ) keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ): identifier[_replace_md5] ( identifier[path] ) keyword[else] : identifier[level] = identifier[gallery_conf] [ literal[string] ]. identifier[get] ( literal[string] , literal[string] ) identifier[func] = identifier[getattr] ( identifier[logger] , identifier[level] ) identifier[func] ( literal[string] %( identifier[path] ,)) identifier[func] ( literal[string] literal[string] )
def finalize_backreferences(seen_backrefs, gallery_conf): """Replace backref files only if necessary.""" logger = sphinx_compatibility.getLogger('sphinx-gallery') if gallery_conf['backreferences_dir'] is None: return # depends on [control=['if'], data=[]] for backref in seen_backrefs: path = os.path.join(gallery_conf['src_dir'], gallery_conf['backreferences_dir'], '%s.examples.new' % backref) if os.path.isfile(path): _replace_md5(path) # depends on [control=['if'], data=[]] else: level = gallery_conf['log_level'].get('backreference_missing', 'warning') func = getattr(logger, level) func('Could not find backreferences file: %s' % (path,)) func('The backreferences are likely to be erroneous due to file system case insensitivity.') # depends on [control=['for'], data=['backref']]
def _create_related(args): # type: (Dict) -> None """Create related field from `_embed` arguments.""" if '_embed' in request.args: embeds = request.args.getlist('_embed') args['related'] = ','.join(embeds) del args['_embed']
def function[_create_related, parameter[args]]: constant[Create related field from `_embed` arguments.] if compare[constant[_embed] in name[request].args] begin[:] variable[embeds] assign[=] call[name[request].args.getlist, parameter[constant[_embed]]] call[name[args]][constant[related]] assign[=] call[constant[,].join, parameter[name[embeds]]] <ast.Delete object at 0x7da1b196cb20>
keyword[def] identifier[_create_related] ( identifier[args] ): literal[string] keyword[if] literal[string] keyword[in] identifier[request] . identifier[args] : identifier[embeds] = identifier[request] . identifier[args] . identifier[getlist] ( literal[string] ) identifier[args] [ literal[string] ]= literal[string] . identifier[join] ( identifier[embeds] ) keyword[del] identifier[args] [ literal[string] ]
def _create_related(args): # type: (Dict) -> None 'Create related field from `_embed` arguments.' if '_embed' in request.args: embeds = request.args.getlist('_embed') args['related'] = ','.join(embeds) del args['_embed'] # depends on [control=['if'], data=[]]
async def load(self, turn_context: TurnContext, force: bool = False) -> None: """ Reads in the current state object and caches it in the context object for this turm. :param turn_context: The context object for this turn. :param force: Optional. True to bypass the cache. """ if turn_context == None: raise TypeError('BotState.load(): turn_context cannot be None.') cached_state = turn_context.turn_state.get(self._context_service_key) storage_key = self.get_storage_key(turn_context) if (force or not cached_state or not cached_state.state) : items = await self._storage.read([storage_key]) val = items.get(storage_key) turn_context.turn_state[self._context_service_key] = CachedBotState(val)
<ast.AsyncFunctionDef object at 0x7da1b05fbdf0>
keyword[async] keyword[def] identifier[load] ( identifier[self] , identifier[turn_context] : identifier[TurnContext] , identifier[force] : identifier[bool] = keyword[False] )-> keyword[None] : literal[string] keyword[if] identifier[turn_context] == keyword[None] : keyword[raise] identifier[TypeError] ( literal[string] ) identifier[cached_state] = identifier[turn_context] . identifier[turn_state] . identifier[get] ( identifier[self] . identifier[_context_service_key] ) identifier[storage_key] = identifier[self] . identifier[get_storage_key] ( identifier[turn_context] ) keyword[if] ( identifier[force] keyword[or] keyword[not] identifier[cached_state] keyword[or] keyword[not] identifier[cached_state] . identifier[state] ): identifier[items] = keyword[await] identifier[self] . identifier[_storage] . identifier[read] ([ identifier[storage_key] ]) identifier[val] = identifier[items] . identifier[get] ( identifier[storage_key] ) identifier[turn_context] . identifier[turn_state] [ identifier[self] . identifier[_context_service_key] ]= identifier[CachedBotState] ( identifier[val] )
async def load(self, turn_context: TurnContext, force: bool=False) -> None: """ Reads in the current state object and caches it in the context object for this turm. :param turn_context: The context object for this turn. :param force: Optional. True to bypass the cache. """ if turn_context == None: raise TypeError('BotState.load(): turn_context cannot be None.') # depends on [control=['if'], data=[]] cached_state = turn_context.turn_state.get(self._context_service_key) storage_key = self.get_storage_key(turn_context) if force or not cached_state or (not cached_state.state): items = await self._storage.read([storage_key]) val = items.get(storage_key) turn_context.turn_state[self._context_service_key] = CachedBotState(val) # depends on [control=['if'], data=[]]
def get_inner_template(self, language, template_type, indentation, key, val): """ Gets the requested template for the given language. Args: language: string, the language of the template to look for. template_type: string, 'iterable' or 'singular'. An iterable template is needed when the value is an iterable and needs more unpacking, e.g. list, tuple. A singular template is needed when unpacking is complete and the value is singular, e.g. string, int, float. indentation: int, the indentation level. key: multiple types, the array key. val: multiple types, the array values Returns: string, template formatting for arrays by language. """ #Language specific inner templates inner_templates = {'php' : { 'iterable' : '%s%s => array \n%s( \n%s%s),\n' % (indentation, key, indentation, val, indentation), 'singular' : '%s%s => %s, \n' % (indentation, key, val) }, 'javascript' : { 'iterable' : '%s%s : {\n%s\n%s},\n' % (indentation, key, val, indentation), 'singular' : '%s%s: %s,\n' % (indentation, key, val)}, 'ocaml' : { 'iterable' : '%s[| (%s, (\n%s\n%s))|] ;;\n' % (indentation, key, val, indentation), 'singular' : '%s(%s, %s);\n' % (indentation, key, val)}} return inner_templates[language][template_type]
def function[get_inner_template, parameter[self, language, template_type, indentation, key, val]]: constant[ Gets the requested template for the given language. Args: language: string, the language of the template to look for. template_type: string, 'iterable' or 'singular'. An iterable template is needed when the value is an iterable and needs more unpacking, e.g. list, tuple. A singular template is needed when unpacking is complete and the value is singular, e.g. string, int, float. indentation: int, the indentation level. key: multiple types, the array key. val: multiple types, the array values Returns: string, template formatting for arrays by language. ] variable[inner_templates] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a8bb0>, <ast.Constant object at 0x7da20c6aafb0>, <ast.Constant object at 0x7da20c6abd00>], [<ast.Dict object at 0x7da20c6a87c0>, <ast.Dict object at 0x7da20c6a8d30>, <ast.Dict object at 0x7da20c6a8d00>]] return[call[call[name[inner_templates]][name[language]]][name[template_type]]]
keyword[def] identifier[get_inner_template] ( identifier[self] , identifier[language] , identifier[template_type] , identifier[indentation] , identifier[key] , identifier[val] ): literal[string] identifier[inner_templates] ={ literal[string] :{ literal[string] : literal[string] %( identifier[indentation] , identifier[key] , identifier[indentation] , identifier[val] , identifier[indentation] ), literal[string] : literal[string] %( identifier[indentation] , identifier[key] , identifier[val] )}, literal[string] :{ literal[string] : literal[string] %( identifier[indentation] , identifier[key] , identifier[val] , identifier[indentation] ), literal[string] : literal[string] %( identifier[indentation] , identifier[key] , identifier[val] )}, literal[string] :{ literal[string] : literal[string] %( identifier[indentation] , identifier[key] , identifier[val] , identifier[indentation] ), literal[string] : literal[string] %( identifier[indentation] , identifier[key] , identifier[val] )}} keyword[return] identifier[inner_templates] [ identifier[language] ][ identifier[template_type] ]
def get_inner_template(self, language, template_type, indentation, key, val): """ Gets the requested template for the given language. Args: language: string, the language of the template to look for. template_type: string, 'iterable' or 'singular'. An iterable template is needed when the value is an iterable and needs more unpacking, e.g. list, tuple. A singular template is needed when unpacking is complete and the value is singular, e.g. string, int, float. indentation: int, the indentation level. key: multiple types, the array key. val: multiple types, the array values Returns: string, template formatting for arrays by language. """ #Language specific inner templates inner_templates = {'php': {'iterable': '%s%s => array \n%s( \n%s%s),\n' % (indentation, key, indentation, val, indentation), 'singular': '%s%s => %s, \n' % (indentation, key, val)}, 'javascript': {'iterable': '%s%s : {\n%s\n%s},\n' % (indentation, key, val, indentation), 'singular': '%s%s: %s,\n' % (indentation, key, val)}, 'ocaml': {'iterable': '%s[| (%s, (\n%s\n%s))|] ;;\n' % (indentation, key, val, indentation), 'singular': '%s(%s, %s);\n' % (indentation, key, val)}} return inner_templates[language][template_type]
def _CheckIsDirectory(self, file_entry): """Checks the is_directory find specification. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not. """ if definitions.FILE_ENTRY_TYPE_DIRECTORY not in self._file_entry_types: return False return file_entry.IsDirectory()
def function[_CheckIsDirectory, parameter[self, file_entry]]: constant[Checks the is_directory find specification. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not. ] if compare[name[definitions].FILE_ENTRY_TYPE_DIRECTORY <ast.NotIn object at 0x7da2590d7190> name[self]._file_entry_types] begin[:] return[constant[False]] return[call[name[file_entry].IsDirectory, parameter[]]]
keyword[def] identifier[_CheckIsDirectory] ( identifier[self] , identifier[file_entry] ): literal[string] keyword[if] identifier[definitions] . identifier[FILE_ENTRY_TYPE_DIRECTORY] keyword[not] keyword[in] identifier[self] . identifier[_file_entry_types] : keyword[return] keyword[False] keyword[return] identifier[file_entry] . identifier[IsDirectory] ()
def _CheckIsDirectory(self, file_entry): """Checks the is_directory find specification. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not. """ if definitions.FILE_ENTRY_TYPE_DIRECTORY not in self._file_entry_types: return False # depends on [control=['if'], data=[]] return file_entry.IsDirectory()
def get_collection(self, collection_id=None, nav="children", page=None): """ Makes a call on the Collection API :param collection_id: Id of the collection to retrieve :param nav: Direction of the navigation :param page: Page to retrieve :return: Response :rtype: requests.Response """ return self.call( "collections", { "id": collection_id, "nav": nav, "page": page }, defaults={ "id": None, "nav": "children", "page": 1 } )
def function[get_collection, parameter[self, collection_id, nav, page]]: constant[ Makes a call on the Collection API :param collection_id: Id of the collection to retrieve :param nav: Direction of the navigation :param page: Page to retrieve :return: Response :rtype: requests.Response ] return[call[name[self].call, parameter[constant[collections], dictionary[[<ast.Constant object at 0x7da20e954c70>, <ast.Constant object at 0x7da20e954e80>, <ast.Constant object at 0x7da20e957880>], [<ast.Name object at 0x7da20e957010>, <ast.Name object at 0x7da20e955db0>, <ast.Name object at 0x7da20e954790>]]]]]
keyword[def] identifier[get_collection] ( identifier[self] , identifier[collection_id] = keyword[None] , identifier[nav] = literal[string] , identifier[page] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[call] ( literal[string] , { literal[string] : identifier[collection_id] , literal[string] : identifier[nav] , literal[string] : identifier[page] }, identifier[defaults] ={ literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[int] } )
def get_collection(self, collection_id=None, nav='children', page=None): """ Makes a call on the Collection API :param collection_id: Id of the collection to retrieve :param nav: Direction of the navigation :param page: Page to retrieve :return: Response :rtype: requests.Response """ return self.call('collections', {'id': collection_id, 'nav': nav, 'page': page}, defaults={'id': None, 'nav': 'children', 'page': 1})
def use(self, tube): """Start producing jobs into the given tube. :param tube: Name of the tube to USE Subsequent calls to :func:`put_job` insert jobs into this tube. """ with self._sock_ctx() as socket: if self.current_tube != tube: self.desired_tube = tube self._send_message('use {0}'.format(tube), socket) self._receive_name(socket) self.current_tube = tube
def function[use, parameter[self, tube]]: constant[Start producing jobs into the given tube. :param tube: Name of the tube to USE Subsequent calls to :func:`put_job` insert jobs into this tube. ] with call[name[self]._sock_ctx, parameter[]] begin[:] if compare[name[self].current_tube not_equal[!=] name[tube]] begin[:] name[self].desired_tube assign[=] name[tube] call[name[self]._send_message, parameter[call[constant[use {0}].format, parameter[name[tube]]], name[socket]]] call[name[self]._receive_name, parameter[name[socket]]] name[self].current_tube assign[=] name[tube]
keyword[def] identifier[use] ( identifier[self] , identifier[tube] ): literal[string] keyword[with] identifier[self] . identifier[_sock_ctx] () keyword[as] identifier[socket] : keyword[if] identifier[self] . identifier[current_tube] != identifier[tube] : identifier[self] . identifier[desired_tube] = identifier[tube] identifier[self] . identifier[_send_message] ( literal[string] . identifier[format] ( identifier[tube] ), identifier[socket] ) identifier[self] . identifier[_receive_name] ( identifier[socket] ) identifier[self] . identifier[current_tube] = identifier[tube]
def use(self, tube): """Start producing jobs into the given tube. :param tube: Name of the tube to USE Subsequent calls to :func:`put_job` insert jobs into this tube. """ with self._sock_ctx() as socket: if self.current_tube != tube: self.desired_tube = tube self._send_message('use {0}'.format(tube), socket) self._receive_name(socket) self.current_tube = tube # depends on [control=['if'], data=['tube']] # depends on [control=['with'], data=['socket']]
def delete_request(profile, resource): """Do a DELETE request to Github's API. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. Returns: The response returned by the ``requests`` library when it does the POST request. """ url = get_url(profile, resource) headers = get_headers(profile) return requests.delete(url, headers=headers)
def function[delete_request, parameter[profile, resource]]: constant[Do a DELETE request to Github's API. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. Returns: The response returned by the ``requests`` library when it does the POST request. ] variable[url] assign[=] call[name[get_url], parameter[name[profile], name[resource]]] variable[headers] assign[=] call[name[get_headers], parameter[name[profile]]] return[call[name[requests].delete, parameter[name[url]]]]
keyword[def] identifier[delete_request] ( identifier[profile] , identifier[resource] ): literal[string] identifier[url] = identifier[get_url] ( identifier[profile] , identifier[resource] ) identifier[headers] = identifier[get_headers] ( identifier[profile] ) keyword[return] identifier[requests] . identifier[delete] ( identifier[url] , identifier[headers] = identifier[headers] )
def delete_request(profile, resource): """Do a DELETE request to Github's API. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. Returns: The response returned by the ``requests`` library when it does the POST request. """ url = get_url(profile, resource) headers = get_headers(profile) return requests.delete(url, headers=headers)
def _get_complex_type_production(complex_type: ComplexType, multi_match_mapping: Dict[Type, List[Type]]) -> List[Tuple[Type, str]]: """ Takes a complex type (without any placeholders), gets its return values, and returns productions (perhaps each with multiple arguments) that produce the return values. This method also takes care of ``MultiMatchNamedBasicTypes``. If one of the arguments or the return types is a multi match type, it gets all the substitutions of those types from ``multi_match_mapping`` and forms a list with all possible combinations of substitutions. If the complex type passed to this method has no ``MultiMatchNamedBasicTypes``, the returned list will contain a single tuple. For example, if the complex is type ``<a,<<b,c>,d>>``, and ``a`` is a multi match type that matches ``e`` and ``f``, this gives the following list of tuples: ``[('d', 'd -> [<a,<<b,c>,d>, e, <b,c>]), ('d', 'd -> [<a,<<b,c>,d>, f, <b,c>])]`` Note that we assume there will be no productions from the multi match type, and the list above does not contain ``('d', 'd -> [<a,<<b,c>,d>, a, <b,c>>]')``. """ return_type = complex_type.return_type() if isinstance(return_type, MultiMatchNamedBasicType): return_types_matched = list(multi_match_mapping[return_type] if return_type in multi_match_mapping else return_type.types_to_match) else: return_types_matched = [return_type] arguments = complex_type.argument_types() argument_types_matched = [] for argument_type in arguments: if isinstance(argument_type, MultiMatchNamedBasicType): matched_types = list(multi_match_mapping[argument_type] if argument_type in multi_match_mapping else argument_type.types_to_match) argument_types_matched.append(matched_types) else: argument_types_matched.append([argument_type]) complex_type_productions: List[Tuple[Type, str]] = [] for matched_return_type in return_types_matched: for matched_arguments in itertools.product(*argument_types_matched): complex_type_productions.append((matched_return_type, _make_production_string(return_type, [complex_type] + list(matched_arguments)))) return complex_type_productions
def function[_get_complex_type_production, parameter[complex_type, multi_match_mapping]]: constant[ Takes a complex type (without any placeholders), gets its return values, and returns productions (perhaps each with multiple arguments) that produce the return values. This method also takes care of ``MultiMatchNamedBasicTypes``. If one of the arguments or the return types is a multi match type, it gets all the substitutions of those types from ``multi_match_mapping`` and forms a list with all possible combinations of substitutions. If the complex type passed to this method has no ``MultiMatchNamedBasicTypes``, the returned list will contain a single tuple. For example, if the complex is type ``<a,<<b,c>,d>>``, and ``a`` is a multi match type that matches ``e`` and ``f``, this gives the following list of tuples: ``[('d', 'd -> [<a,<<b,c>,d>, e, <b,c>]), ('d', 'd -> [<a,<<b,c>,d>, f, <b,c>])]`` Note that we assume there will be no productions from the multi match type, and the list above does not contain ``('d', 'd -> [<a,<<b,c>,d>, a, <b,c>>]')``. ] variable[return_type] assign[=] call[name[complex_type].return_type, parameter[]] if call[name[isinstance], parameter[name[return_type], name[MultiMatchNamedBasicType]]] begin[:] variable[return_types_matched] assign[=] call[name[list], parameter[<ast.IfExp object at 0x7da1b201d120>]] variable[arguments] assign[=] call[name[complex_type].argument_types, parameter[]] variable[argument_types_matched] assign[=] list[[]] for taget[name[argument_type]] in starred[name[arguments]] begin[:] if call[name[isinstance], parameter[name[argument_type], name[MultiMatchNamedBasicType]]] begin[:] variable[matched_types] assign[=] call[name[list], parameter[<ast.IfExp object at 0x7da18f813b50>]] call[name[argument_types_matched].append, parameter[name[matched_types]]] <ast.AnnAssign object at 0x7da18f813790> for taget[name[matched_return_type]] in starred[name[return_types_matched]] begin[:] for taget[name[matched_arguments]] in starred[call[name[itertools].product, parameter[<ast.Starred object at 0x7da1b1f94070>]]] begin[:] call[name[complex_type_productions].append, parameter[tuple[[<ast.Name object at 0x7da1b1f96590>, <ast.Call object at 0x7da20c9926e0>]]]] return[name[complex_type_productions]]
keyword[def] identifier[_get_complex_type_production] ( identifier[complex_type] : identifier[ComplexType] , identifier[multi_match_mapping] : identifier[Dict] [ identifier[Type] , identifier[List] [ identifier[Type] ]])-> identifier[List] [ identifier[Tuple] [ identifier[Type] , identifier[str] ]]: literal[string] identifier[return_type] = identifier[complex_type] . identifier[return_type] () keyword[if] identifier[isinstance] ( identifier[return_type] , identifier[MultiMatchNamedBasicType] ): identifier[return_types_matched] = identifier[list] ( identifier[multi_match_mapping] [ identifier[return_type] ] keyword[if] identifier[return_type] keyword[in] identifier[multi_match_mapping] keyword[else] identifier[return_type] . identifier[types_to_match] ) keyword[else] : identifier[return_types_matched] =[ identifier[return_type] ] identifier[arguments] = identifier[complex_type] . identifier[argument_types] () identifier[argument_types_matched] =[] keyword[for] identifier[argument_type] keyword[in] identifier[arguments] : keyword[if] identifier[isinstance] ( identifier[argument_type] , identifier[MultiMatchNamedBasicType] ): identifier[matched_types] = identifier[list] ( identifier[multi_match_mapping] [ identifier[argument_type] ] keyword[if] identifier[argument_type] keyword[in] identifier[multi_match_mapping] keyword[else] identifier[argument_type] . identifier[types_to_match] ) identifier[argument_types_matched] . identifier[append] ( identifier[matched_types] ) keyword[else] : identifier[argument_types_matched] . identifier[append] ([ identifier[argument_type] ]) identifier[complex_type_productions] : identifier[List] [ identifier[Tuple] [ identifier[Type] , identifier[str] ]]=[] keyword[for] identifier[matched_return_type] keyword[in] identifier[return_types_matched] : keyword[for] identifier[matched_arguments] keyword[in] identifier[itertools] . identifier[product] (* identifier[argument_types_matched] ): identifier[complex_type_productions] . identifier[append] (( identifier[matched_return_type] , identifier[_make_production_string] ( identifier[return_type] , [ identifier[complex_type] ]+ identifier[list] ( identifier[matched_arguments] )))) keyword[return] identifier[complex_type_productions]
def _get_complex_type_production(complex_type: ComplexType, multi_match_mapping: Dict[Type, List[Type]]) -> List[Tuple[Type, str]]: """ Takes a complex type (without any placeholders), gets its return values, and returns productions (perhaps each with multiple arguments) that produce the return values. This method also takes care of ``MultiMatchNamedBasicTypes``. If one of the arguments or the return types is a multi match type, it gets all the substitutions of those types from ``multi_match_mapping`` and forms a list with all possible combinations of substitutions. If the complex type passed to this method has no ``MultiMatchNamedBasicTypes``, the returned list will contain a single tuple. For example, if the complex is type ``<a,<<b,c>,d>>``, and ``a`` is a multi match type that matches ``e`` and ``f``, this gives the following list of tuples: ``[('d', 'd -> [<a,<<b,c>,d>, e, <b,c>]), ('d', 'd -> [<a,<<b,c>,d>, f, <b,c>])]`` Note that we assume there will be no productions from the multi match type, and the list above does not contain ``('d', 'd -> [<a,<<b,c>,d>, a, <b,c>>]')``. """ return_type = complex_type.return_type() if isinstance(return_type, MultiMatchNamedBasicType): return_types_matched = list(multi_match_mapping[return_type] if return_type in multi_match_mapping else return_type.types_to_match) # depends on [control=['if'], data=[]] else: return_types_matched = [return_type] arguments = complex_type.argument_types() argument_types_matched = [] for argument_type in arguments: if isinstance(argument_type, MultiMatchNamedBasicType): matched_types = list(multi_match_mapping[argument_type] if argument_type in multi_match_mapping else argument_type.types_to_match) argument_types_matched.append(matched_types) # depends on [control=['if'], data=[]] else: argument_types_matched.append([argument_type]) # depends on [control=['for'], data=['argument_type']] complex_type_productions: List[Tuple[Type, str]] = [] for matched_return_type in return_types_matched: for matched_arguments in itertools.product(*argument_types_matched): complex_type_productions.append((matched_return_type, _make_production_string(return_type, [complex_type] + list(matched_arguments)))) # depends on [control=['for'], data=['matched_arguments']] # depends on [control=['for'], data=['matched_return_type']] return complex_type_productions
def is_valid_channel(self, channel, conda_url='https://conda.anaconda.org', non_blocking=True): """Check if a conda channel is valid.""" logger.debug(str((channel, conda_url))) if non_blocking: method = self._is_valid_channel return self._create_worker(method, channel, conda_url) else: return self._is_valid_channel(channel, conda_url=conda_url)
def function[is_valid_channel, parameter[self, channel, conda_url, non_blocking]]: constant[Check if a conda channel is valid.] call[name[logger].debug, parameter[call[name[str], parameter[tuple[[<ast.Name object at 0x7da1b27b7eb0>, <ast.Name object at 0x7da1b27b5120>]]]]]] if name[non_blocking] begin[:] variable[method] assign[=] name[self]._is_valid_channel return[call[name[self]._create_worker, parameter[name[method], name[channel], name[conda_url]]]]
keyword[def] identifier[is_valid_channel] ( identifier[self] , identifier[channel] , identifier[conda_url] = literal[string] , identifier[non_blocking] = keyword[True] ): literal[string] identifier[logger] . identifier[debug] ( identifier[str] (( identifier[channel] , identifier[conda_url] ))) keyword[if] identifier[non_blocking] : identifier[method] = identifier[self] . identifier[_is_valid_channel] keyword[return] identifier[self] . identifier[_create_worker] ( identifier[method] , identifier[channel] , identifier[conda_url] ) keyword[else] : keyword[return] identifier[self] . identifier[_is_valid_channel] ( identifier[channel] , identifier[conda_url] = identifier[conda_url] )
def is_valid_channel(self, channel, conda_url='https://conda.anaconda.org', non_blocking=True): """Check if a conda channel is valid.""" logger.debug(str((channel, conda_url))) if non_blocking: method = self._is_valid_channel return self._create_worker(method, channel, conda_url) # depends on [control=['if'], data=[]] else: return self._is_valid_channel(channel, conda_url=conda_url)
def add_element(self, tag): '''Record that `tag` has been seen at this depth. If `tag` is :class:`TextElement`, it records a text node. ''' # Collapse adjacent text nodes if tag is TextElement and self.last_tag is TextElement: return self.last_tag = tag if tag not in self.tags: self.tags[tag] = 1 else: self.tags[tag] += 1
def function[add_element, parameter[self, tag]]: constant[Record that `tag` has been seen at this depth. If `tag` is :class:`TextElement`, it records a text node. ] if <ast.BoolOp object at 0x7da2054a44f0> begin[:] return[None] name[self].last_tag assign[=] name[tag] if compare[name[tag] <ast.NotIn object at 0x7da2590d7190> name[self].tags] begin[:] call[name[self].tags][name[tag]] assign[=] constant[1]
keyword[def] identifier[add_element] ( identifier[self] , identifier[tag] ): literal[string] keyword[if] identifier[tag] keyword[is] identifier[TextElement] keyword[and] identifier[self] . identifier[last_tag] keyword[is] identifier[TextElement] : keyword[return] identifier[self] . identifier[last_tag] = identifier[tag] keyword[if] identifier[tag] keyword[not] keyword[in] identifier[self] . identifier[tags] : identifier[self] . identifier[tags] [ identifier[tag] ]= literal[int] keyword[else] : identifier[self] . identifier[tags] [ identifier[tag] ]+= literal[int]
def add_element(self, tag): """Record that `tag` has been seen at this depth. If `tag` is :class:`TextElement`, it records a text node. """ # Collapse adjacent text nodes if tag is TextElement and self.last_tag is TextElement: return # depends on [control=['if'], data=[]] self.last_tag = tag if tag not in self.tags: self.tags[tag] = 1 # depends on [control=['if'], data=['tag']] else: self.tags[tag] += 1
def approx_count_distinct(col, rsd=None): """Aggregate function: returns a new :class:`Column` for approximate distinct count of column `col`. :param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more efficient to use :func:`countDistinct` >>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect() [Row(distinct_ages=2)] """ sc = SparkContext._active_spark_context if rsd is None: jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col)) else: jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd) return Column(jc)
def function[approx_count_distinct, parameter[col, rsd]]: constant[Aggregate function: returns a new :class:`Column` for approximate distinct count of column `col`. :param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more efficient to use :func:`countDistinct` >>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect() [Row(distinct_ages=2)] ] variable[sc] assign[=] name[SparkContext]._active_spark_context if compare[name[rsd] is constant[None]] begin[:] variable[jc] assign[=] call[name[sc]._jvm.functions.approx_count_distinct, parameter[call[name[_to_java_column], parameter[name[col]]]]] return[call[name[Column], parameter[name[jc]]]]
keyword[def] identifier[approx_count_distinct] ( identifier[col] , identifier[rsd] = keyword[None] ): literal[string] identifier[sc] = identifier[SparkContext] . identifier[_active_spark_context] keyword[if] identifier[rsd] keyword[is] keyword[None] : identifier[jc] = identifier[sc] . identifier[_jvm] . identifier[functions] . identifier[approx_count_distinct] ( identifier[_to_java_column] ( identifier[col] )) keyword[else] : identifier[jc] = identifier[sc] . identifier[_jvm] . identifier[functions] . identifier[approx_count_distinct] ( identifier[_to_java_column] ( identifier[col] ), identifier[rsd] ) keyword[return] identifier[Column] ( identifier[jc] )
def approx_count_distinct(col, rsd=None): """Aggregate function: returns a new :class:`Column` for approximate distinct count of column `col`. :param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more efficient to use :func:`countDistinct` >>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect() [Row(distinct_ages=2)] """ sc = SparkContext._active_spark_context if rsd is None: jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col)) # depends on [control=['if'], data=[]] else: jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd) return Column(jc)
def member_absent(ip, port, balancer_id, profile, **libcloud_kwargs): ''' Ensure a load balancer member is absent, based on IP and Port :param ip: IP address for the member :type ip: ``str`` :param port: Port for the member :type port: ``int`` :param balancer_id: id of a load balancer you want to detach the member from :type balancer_id: ``str`` :param profile: The profile key :type profile: ``str`` ''' existing_members = __salt__['libcloud_loadbalancer.list_balancer_members'](balancer_id, profile) for member in existing_members: if member['ip'] == ip and member['port'] == port: result = __salt__['libcloud_loadbalancer.balancer_detach_member'](balancer_id, member['id'], profile, **libcloud_kwargs) return state_result(result, "Member removed", balancer_id) return state_result(True, "Member already absent", balancer_id)
def function[member_absent, parameter[ip, port, balancer_id, profile]]: constant[ Ensure a load balancer member is absent, based on IP and Port :param ip: IP address for the member :type ip: ``str`` :param port: Port for the member :type port: ``int`` :param balancer_id: id of a load balancer you want to detach the member from :type balancer_id: ``str`` :param profile: The profile key :type profile: ``str`` ] variable[existing_members] assign[=] call[call[name[__salt__]][constant[libcloud_loadbalancer.list_balancer_members]], parameter[name[balancer_id], name[profile]]] for taget[name[member]] in starred[name[existing_members]] begin[:] if <ast.BoolOp object at 0x7da1b1c0d990> begin[:] variable[result] assign[=] call[call[name[__salt__]][constant[libcloud_loadbalancer.balancer_detach_member]], parameter[name[balancer_id], call[name[member]][constant[id]], name[profile]]] return[call[name[state_result], parameter[name[result], constant[Member removed], name[balancer_id]]]] return[call[name[state_result], parameter[constant[True], constant[Member already absent], name[balancer_id]]]]
keyword[def] identifier[member_absent] ( identifier[ip] , identifier[port] , identifier[balancer_id] , identifier[profile] ,** identifier[libcloud_kwargs] ): literal[string] identifier[existing_members] = identifier[__salt__] [ literal[string] ]( identifier[balancer_id] , identifier[profile] ) keyword[for] identifier[member] keyword[in] identifier[existing_members] : keyword[if] identifier[member] [ literal[string] ]== identifier[ip] keyword[and] identifier[member] [ literal[string] ]== identifier[port] : identifier[result] = identifier[__salt__] [ literal[string] ]( identifier[balancer_id] , identifier[member] [ literal[string] ], identifier[profile] ,** identifier[libcloud_kwargs] ) keyword[return] identifier[state_result] ( identifier[result] , literal[string] , identifier[balancer_id] ) keyword[return] identifier[state_result] ( keyword[True] , literal[string] , identifier[balancer_id] )
def member_absent(ip, port, balancer_id, profile, **libcloud_kwargs): """ Ensure a load balancer member is absent, based on IP and Port :param ip: IP address for the member :type ip: ``str`` :param port: Port for the member :type port: ``int`` :param balancer_id: id of a load balancer you want to detach the member from :type balancer_id: ``str`` :param profile: The profile key :type profile: ``str`` """ existing_members = __salt__['libcloud_loadbalancer.list_balancer_members'](balancer_id, profile) for member in existing_members: if member['ip'] == ip and member['port'] == port: result = __salt__['libcloud_loadbalancer.balancer_detach_member'](balancer_id, member['id'], profile, **libcloud_kwargs) return state_result(result, 'Member removed', balancer_id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['member']] return state_result(True, 'Member already absent', balancer_id)
def setup_mpi_gpus(): """ Set CUDA_VISIBLE_DEVICES to MPI rank if not already set """ if 'CUDA_VISIBLE_DEVICES' not in os.environ: if sys.platform == 'darwin': # This Assumes if you're on OSX you're just ids = [] # doing a smoke test and don't want GPUs else: lrank, _lsize = get_local_rank_size(MPI.COMM_WORLD) ids = [lrank] os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, ids))
def function[setup_mpi_gpus, parameter[]]: constant[ Set CUDA_VISIBLE_DEVICES to MPI rank if not already set ] if compare[constant[CUDA_VISIBLE_DEVICES] <ast.NotIn object at 0x7da2590d7190> name[os].environ] begin[:] if compare[name[sys].platform equal[==] constant[darwin]] begin[:] variable[ids] assign[=] list[[]] call[name[os].environ][constant[CUDA_VISIBLE_DEVICES]] assign[=] call[constant[,].join, parameter[call[name[map], parameter[name[str], name[ids]]]]]
keyword[def] identifier[setup_mpi_gpus] (): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[os] . identifier[environ] : keyword[if] identifier[sys] . identifier[platform] == literal[string] : identifier[ids] =[] keyword[else] : identifier[lrank] , identifier[_lsize] = identifier[get_local_rank_size] ( identifier[MPI] . identifier[COMM_WORLD] ) identifier[ids] =[ identifier[lrank] ] identifier[os] . identifier[environ] [ literal[string] ]= literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[ids] ))
def setup_mpi_gpus(): """ Set CUDA_VISIBLE_DEVICES to MPI rank if not already set """ if 'CUDA_VISIBLE_DEVICES' not in os.environ: if sys.platform == 'darwin': # This Assumes if you're on OSX you're just ids = [] # doing a smoke test and don't want GPUs # depends on [control=['if'], data=[]] else: (lrank, _lsize) = get_local_rank_size(MPI.COMM_WORLD) ids = [lrank] os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, ids)) # depends on [control=['if'], data=[]]
def del_node(self, char, node): """Remove a node from a character.""" del self._real.character[char].node[node] for cache in ( self._char_nodes_rulebooks_cache, self._node_stat_cache, self._node_successors_cache ): try: del cache[char][node] except KeyError: pass if char in self._char_nodes_cache and node in self._char_nodes_cache[char]: self._char_nodes_cache[char] = self._char_nodes_cache[char] - frozenset([node]) if char in self._portal_stat_cache: portal_stat_cache_char = self._portal_stat_cache[char] if node in portal_stat_cache_char: del portal_stat_cache_char[node] for charo in portal_stat_cache_char.values(): if node in charo: del charo[node] if char in self._char_portals_rulebooks_cache: portal_rulebook_cache_char = self._char_portals_rulebooks_cache[char] if node in portal_rulebook_cache_char: del portal_rulebook_cache_char[node] for porto in portal_rulebook_cache_char.values(): if node in porto: del porto[node]
def function[del_node, parameter[self, char, node]]: constant[Remove a node from a character.] <ast.Delete object at 0x7da1b0b819c0> for taget[name[cache]] in starred[tuple[[<ast.Attribute object at 0x7da1b0b804f0>, <ast.Attribute object at 0x7da1b0b81480>, <ast.Attribute object at 0x7da1b0b82b90>]]] begin[:] <ast.Try object at 0x7da1b0b83940> if <ast.BoolOp object at 0x7da1b0b83760> begin[:] call[name[self]._char_nodes_cache][name[char]] assign[=] binary_operation[call[name[self]._char_nodes_cache][name[char]] - call[name[frozenset], parameter[list[[<ast.Name object at 0x7da1b0b81120>]]]]] if compare[name[char] in name[self]._portal_stat_cache] begin[:] variable[portal_stat_cache_char] assign[=] call[name[self]._portal_stat_cache][name[char]] if compare[name[node] in name[portal_stat_cache_char]] begin[:] <ast.Delete object at 0x7da1b0b83550> for taget[name[charo]] in starred[call[name[portal_stat_cache_char].values, parameter[]]] begin[:] if compare[name[node] in name[charo]] begin[:] <ast.Delete object at 0x7da1b0b824a0> if compare[name[char] in name[self]._char_portals_rulebooks_cache] begin[:] variable[portal_rulebook_cache_char] assign[=] call[name[self]._char_portals_rulebooks_cache][name[char]] if compare[name[node] in name[portal_rulebook_cache_char]] begin[:] <ast.Delete object at 0x7da1b0ba6f80> for taget[name[porto]] in starred[call[name[portal_rulebook_cache_char].values, parameter[]]] begin[:] if compare[name[node] in name[porto]] begin[:] <ast.Delete object at 0x7da1b0cb7430>
keyword[def] identifier[del_node] ( identifier[self] , identifier[char] , identifier[node] ): literal[string] keyword[del] identifier[self] . identifier[_real] . identifier[character] [ identifier[char] ]. identifier[node] [ identifier[node] ] keyword[for] identifier[cache] keyword[in] ( identifier[self] . identifier[_char_nodes_rulebooks_cache] , identifier[self] . identifier[_node_stat_cache] , identifier[self] . identifier[_node_successors_cache] ): keyword[try] : keyword[del] identifier[cache] [ identifier[char] ][ identifier[node] ] keyword[except] identifier[KeyError] : keyword[pass] keyword[if] identifier[char] keyword[in] identifier[self] . identifier[_char_nodes_cache] keyword[and] identifier[node] keyword[in] identifier[self] . identifier[_char_nodes_cache] [ identifier[char] ]: identifier[self] . identifier[_char_nodes_cache] [ identifier[char] ]= identifier[self] . identifier[_char_nodes_cache] [ identifier[char] ]- identifier[frozenset] ([ identifier[node] ]) keyword[if] identifier[char] keyword[in] identifier[self] . identifier[_portal_stat_cache] : identifier[portal_stat_cache_char] = identifier[self] . identifier[_portal_stat_cache] [ identifier[char] ] keyword[if] identifier[node] keyword[in] identifier[portal_stat_cache_char] : keyword[del] identifier[portal_stat_cache_char] [ identifier[node] ] keyword[for] identifier[charo] keyword[in] identifier[portal_stat_cache_char] . identifier[values] (): keyword[if] identifier[node] keyword[in] identifier[charo] : keyword[del] identifier[charo] [ identifier[node] ] keyword[if] identifier[char] keyword[in] identifier[self] . identifier[_char_portals_rulebooks_cache] : identifier[portal_rulebook_cache_char] = identifier[self] . identifier[_char_portals_rulebooks_cache] [ identifier[char] ] keyword[if] identifier[node] keyword[in] identifier[portal_rulebook_cache_char] : keyword[del] identifier[portal_rulebook_cache_char] [ identifier[node] ] keyword[for] identifier[porto] keyword[in] identifier[portal_rulebook_cache_char] . identifier[values] (): keyword[if] identifier[node] keyword[in] identifier[porto] : keyword[del] identifier[porto] [ identifier[node] ]
def del_node(self, char, node): """Remove a node from a character.""" del self._real.character[char].node[node] for cache in (self._char_nodes_rulebooks_cache, self._node_stat_cache, self._node_successors_cache): try: del cache[char][node] # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['cache']] if char in self._char_nodes_cache and node in self._char_nodes_cache[char]: self._char_nodes_cache[char] = self._char_nodes_cache[char] - frozenset([node]) # depends on [control=['if'], data=[]] if char in self._portal_stat_cache: portal_stat_cache_char = self._portal_stat_cache[char] if node in portal_stat_cache_char: del portal_stat_cache_char[node] # depends on [control=['if'], data=['node', 'portal_stat_cache_char']] for charo in portal_stat_cache_char.values(): if node in charo: del charo[node] # depends on [control=['if'], data=['node', 'charo']] # depends on [control=['for'], data=['charo']] # depends on [control=['if'], data=['char']] if char in self._char_portals_rulebooks_cache: portal_rulebook_cache_char = self._char_portals_rulebooks_cache[char] if node in portal_rulebook_cache_char: del portal_rulebook_cache_char[node] # depends on [control=['if'], data=['node', 'portal_rulebook_cache_char']] for porto in portal_rulebook_cache_char.values(): if node in porto: del porto[node] # depends on [control=['if'], data=['node', 'porto']] # depends on [control=['for'], data=['porto']] # depends on [control=['if'], data=['char']]
def submit(recaptcha_challenge_field, recaptcha_response_field, private_key, remoteip, use_ssl=False): """ Submits a reCAPTCHA request for verification. Returns RecaptchaResponse for the request recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form recaptcha_response_field -- The value of recaptcha_response_field from the form private_key -- your reCAPTCHA private key remoteip -- the user's ip address """ if not (recaptcha_response_field and recaptcha_challenge_field and len(recaptcha_response_field) and len(recaptcha_challenge_field)): return RecaptchaResponse( is_valid=False, error_code='incorrect-captcha-sol' ) if getattr(settings, "NOCAPTCHA", False): params = urlencode({ 'secret': want_bytes(private_key), 'response': want_bytes(recaptcha_response_field), 'remoteip': want_bytes(remoteip), }) else: params = urlencode({ 'privatekey': want_bytes(private_key), 'remoteip': want_bytes(remoteip), 'challenge': want_bytes(recaptcha_challenge_field), 'response': want_bytes(recaptcha_response_field), }) if not PY2: params = params.encode('utf-8') if use_ssl: verify_url = 'https://%s/recaptcha/api/verify' % VERIFY_SERVER else: verify_url = 'http://%s/recaptcha/api/verify' % VERIFY_SERVER if getattr(settings, "NOCAPTCHA", False): verify_url = 'https://%s/recaptcha/api/siteverify' % VERIFY_SERVER req = Request( url=verify_url, data=params, headers={ 'Content-type': 'application/x-www-form-urlencoded', 'User-agent': 'reCAPTCHA Python' } ) httpresp = urlopen(req) if getattr(settings, "NOCAPTCHA", False): data = json.loads(httpresp.read().decode('utf-8')) return_code = data['success'] return_values = [return_code, None] if return_code: return_code = 'true' else: return_code = 'false' else: return_values = httpresp.read().splitlines() return_code = return_values[0] httpresp.close() if (return_code == "true"): return RecaptchaResponse(is_valid=True) else: return RecaptchaResponse(is_valid=False, error_code=return_values[1])
def function[submit, parameter[recaptcha_challenge_field, recaptcha_response_field, private_key, remoteip, use_ssl]]: constant[ Submits a reCAPTCHA request for verification. Returns RecaptchaResponse for the request recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form recaptcha_response_field -- The value of recaptcha_response_field from the form private_key -- your reCAPTCHA private key remoteip -- the user's ip address ] if <ast.UnaryOp object at 0x7da1b1578b20> begin[:] return[call[name[RecaptchaResponse], parameter[]]] if call[name[getattr], parameter[name[settings], constant[NOCAPTCHA], constant[False]]] begin[:] variable[params] assign[=] call[name[urlencode], parameter[dictionary[[<ast.Constant object at 0x7da1b157ae90>, <ast.Constant object at 0x7da1b1578ee0>, <ast.Constant object at 0x7da1b1579b70>], [<ast.Call object at 0x7da1b157b2b0>, <ast.Call object at 0x7da1b1578c10>, <ast.Call object at 0x7da1b15784c0>]]]] if <ast.UnaryOp object at 0x7da1b15794b0> begin[:] variable[params] assign[=] call[name[params].encode, parameter[constant[utf-8]]] if name[use_ssl] begin[:] variable[verify_url] assign[=] binary_operation[constant[https://%s/recaptcha/api/verify] <ast.Mod object at 0x7da2590d6920> name[VERIFY_SERVER]] if call[name[getattr], parameter[name[settings], constant[NOCAPTCHA], constant[False]]] begin[:] variable[verify_url] assign[=] binary_operation[constant[https://%s/recaptcha/api/siteverify] <ast.Mod object at 0x7da2590d6920> name[VERIFY_SERVER]] variable[req] assign[=] call[name[Request], parameter[]] variable[httpresp] assign[=] call[name[urlopen], parameter[name[req]]] if call[name[getattr], parameter[name[settings], constant[NOCAPTCHA], constant[False]]] begin[:] variable[data] assign[=] call[name[json].loads, parameter[call[call[name[httpresp].read, parameter[]].decode, parameter[constant[utf-8]]]]] variable[return_code] assign[=] call[name[data]][constant[success]] variable[return_values] assign[=] list[[<ast.Name object at 0x7da1b1579d80>, <ast.Constant object at 0x7da1b157a740>]] if name[return_code] begin[:] variable[return_code] assign[=] constant[true] call[name[httpresp].close, parameter[]] if compare[name[return_code] equal[==] constant[true]] begin[:] return[call[name[RecaptchaResponse], parameter[]]]
keyword[def] identifier[submit] ( identifier[recaptcha_challenge_field] , identifier[recaptcha_response_field] , identifier[private_key] , identifier[remoteip] , identifier[use_ssl] = keyword[False] ): literal[string] keyword[if] keyword[not] ( identifier[recaptcha_response_field] keyword[and] identifier[recaptcha_challenge_field] keyword[and] identifier[len] ( identifier[recaptcha_response_field] ) keyword[and] identifier[len] ( identifier[recaptcha_challenge_field] )): keyword[return] identifier[RecaptchaResponse] ( identifier[is_valid] = keyword[False] , identifier[error_code] = literal[string] ) keyword[if] identifier[getattr] ( identifier[settings] , literal[string] , keyword[False] ): identifier[params] = identifier[urlencode] ({ literal[string] : identifier[want_bytes] ( identifier[private_key] ), literal[string] : identifier[want_bytes] ( identifier[recaptcha_response_field] ), literal[string] : identifier[want_bytes] ( identifier[remoteip] ), }) keyword[else] : identifier[params] = identifier[urlencode] ({ literal[string] : identifier[want_bytes] ( identifier[private_key] ), literal[string] : identifier[want_bytes] ( identifier[remoteip] ), literal[string] : identifier[want_bytes] ( identifier[recaptcha_challenge_field] ), literal[string] : identifier[want_bytes] ( identifier[recaptcha_response_field] ), }) keyword[if] keyword[not] identifier[PY2] : identifier[params] = identifier[params] . identifier[encode] ( literal[string] ) keyword[if] identifier[use_ssl] : identifier[verify_url] = literal[string] % identifier[VERIFY_SERVER] keyword[else] : identifier[verify_url] = literal[string] % identifier[VERIFY_SERVER] keyword[if] identifier[getattr] ( identifier[settings] , literal[string] , keyword[False] ): identifier[verify_url] = literal[string] % identifier[VERIFY_SERVER] identifier[req] = identifier[Request] ( identifier[url] = identifier[verify_url] , identifier[data] = identifier[params] , identifier[headers] ={ literal[string] : literal[string] , literal[string] : literal[string] } ) identifier[httpresp] = identifier[urlopen] ( identifier[req] ) keyword[if] identifier[getattr] ( identifier[settings] , literal[string] , keyword[False] ): identifier[data] = identifier[json] . identifier[loads] ( identifier[httpresp] . identifier[read] (). identifier[decode] ( literal[string] )) identifier[return_code] = identifier[data] [ literal[string] ] identifier[return_values] =[ identifier[return_code] , keyword[None] ] keyword[if] identifier[return_code] : identifier[return_code] = literal[string] keyword[else] : identifier[return_code] = literal[string] keyword[else] : identifier[return_values] = identifier[httpresp] . identifier[read] (). identifier[splitlines] () identifier[return_code] = identifier[return_values] [ literal[int] ] identifier[httpresp] . identifier[close] () keyword[if] ( identifier[return_code] == literal[string] ): keyword[return] identifier[RecaptchaResponse] ( identifier[is_valid] = keyword[True] ) keyword[else] : keyword[return] identifier[RecaptchaResponse] ( identifier[is_valid] = keyword[False] , identifier[error_code] = identifier[return_values] [ literal[int] ])
def submit(recaptcha_challenge_field, recaptcha_response_field, private_key, remoteip, use_ssl=False): """ Submits a reCAPTCHA request for verification. Returns RecaptchaResponse for the request recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form recaptcha_response_field -- The value of recaptcha_response_field from the form private_key -- your reCAPTCHA private key remoteip -- the user's ip address """ if not (recaptcha_response_field and recaptcha_challenge_field and len(recaptcha_response_field) and len(recaptcha_challenge_field)): return RecaptchaResponse(is_valid=False, error_code='incorrect-captcha-sol') # depends on [control=['if'], data=[]] if getattr(settings, 'NOCAPTCHA', False): params = urlencode({'secret': want_bytes(private_key), 'response': want_bytes(recaptcha_response_field), 'remoteip': want_bytes(remoteip)}) # depends on [control=['if'], data=[]] else: params = urlencode({'privatekey': want_bytes(private_key), 'remoteip': want_bytes(remoteip), 'challenge': want_bytes(recaptcha_challenge_field), 'response': want_bytes(recaptcha_response_field)}) if not PY2: params = params.encode('utf-8') # depends on [control=['if'], data=[]] if use_ssl: verify_url = 'https://%s/recaptcha/api/verify' % VERIFY_SERVER # depends on [control=['if'], data=[]] else: verify_url = 'http://%s/recaptcha/api/verify' % VERIFY_SERVER if getattr(settings, 'NOCAPTCHA', False): verify_url = 'https://%s/recaptcha/api/siteverify' % VERIFY_SERVER # depends on [control=['if'], data=[]] req = Request(url=verify_url, data=params, headers={'Content-type': 'application/x-www-form-urlencoded', 'User-agent': 'reCAPTCHA Python'}) httpresp = urlopen(req) if getattr(settings, 'NOCAPTCHA', False): data = json.loads(httpresp.read().decode('utf-8')) return_code = data['success'] return_values = [return_code, None] if return_code: return_code = 'true' # depends on [control=['if'], data=[]] else: return_code = 'false' # depends on [control=['if'], data=[]] else: return_values = httpresp.read().splitlines() return_code = return_values[0] httpresp.close() if return_code == 'true': return RecaptchaResponse(is_valid=True) # depends on [control=['if'], data=[]] else: return RecaptchaResponse(is_valid=False, error_code=return_values[1])
def render_to_message(self, extra_context=None, *args, **kwargs): """ Renders and returns an unsent message with the given context. Any extra keyword arguments passed will be passed through as keyword arguments to the message constructor. :param extra_context: Any additional context to use when rendering templated content. :type extra_context: :class:`dict` :returns: A message instance. :rtype: :attr:`.message_class` """ message = super(TemplatedHTMLEmailMessageView, self)\ .render_to_message(extra_context, *args, **kwargs) if extra_context is None: extra_context = {} context = self.get_context_data(**extra_context) content = self.render_html_body(context) message.attach_alternative(content, mimetype='text/html') return message
def function[render_to_message, parameter[self, extra_context]]: constant[ Renders and returns an unsent message with the given context. Any extra keyword arguments passed will be passed through as keyword arguments to the message constructor. :param extra_context: Any additional context to use when rendering templated content. :type extra_context: :class:`dict` :returns: A message instance. :rtype: :attr:`.message_class` ] variable[message] assign[=] call[call[name[super], parameter[name[TemplatedHTMLEmailMessageView], name[self]]].render_to_message, parameter[name[extra_context], <ast.Starred object at 0x7da1aff1c0d0>]] if compare[name[extra_context] is constant[None]] begin[:] variable[extra_context] assign[=] dictionary[[], []] variable[context] assign[=] call[name[self].get_context_data, parameter[]] variable[content] assign[=] call[name[self].render_html_body, parameter[name[context]]] call[name[message].attach_alternative, parameter[name[content]]] return[name[message]]
keyword[def] identifier[render_to_message] ( identifier[self] , identifier[extra_context] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[message] = identifier[super] ( identifier[TemplatedHTMLEmailMessageView] , identifier[self] ). identifier[render_to_message] ( identifier[extra_context] ,* identifier[args] ,** identifier[kwargs] ) keyword[if] identifier[extra_context] keyword[is] keyword[None] : identifier[extra_context] ={} identifier[context] = identifier[self] . identifier[get_context_data] (** identifier[extra_context] ) identifier[content] = identifier[self] . identifier[render_html_body] ( identifier[context] ) identifier[message] . identifier[attach_alternative] ( identifier[content] , identifier[mimetype] = literal[string] ) keyword[return] identifier[message]
def render_to_message(self, extra_context=None, *args, **kwargs): """ Renders and returns an unsent message with the given context. Any extra keyword arguments passed will be passed through as keyword arguments to the message constructor. :param extra_context: Any additional context to use when rendering templated content. :type extra_context: :class:`dict` :returns: A message instance. :rtype: :attr:`.message_class` """ message = super(TemplatedHTMLEmailMessageView, self).render_to_message(extra_context, *args, **kwargs) if extra_context is None: extra_context = {} # depends on [control=['if'], data=['extra_context']] context = self.get_context_data(**extra_context) content = self.render_html_body(context) message.attach_alternative(content, mimetype='text/html') return message
def _get_params_for_optimizer(self, prefix, named_parameters): """Parse kwargs configuration for the optimizer identified by the given prefix. Supports param group assignment using wildcards: optimizer__lr=0.05, optimizer__param_groups=[ ('rnn*.period', {'lr': 0.3, 'momentum': 0}), ('rnn0', {'lr': 0.1}), ] The first positional argument are the param groups. """ kwargs = self._get_params_for(prefix) params = list(named_parameters) pgroups = [] for pattern, group in kwargs.pop('param_groups', []): matches = [i for i, (name, _) in enumerate(params) if fnmatch.fnmatch(name, pattern)] if matches: p = [params.pop(i)[1] for i in reversed(matches)] pgroups.append({'params': p, **group}) if params: pgroups.append({'params': [p for _, p in params]}) return [pgroups], kwargs
def function[_get_params_for_optimizer, parameter[self, prefix, named_parameters]]: constant[Parse kwargs configuration for the optimizer identified by the given prefix. Supports param group assignment using wildcards: optimizer__lr=0.05, optimizer__param_groups=[ ('rnn*.period', {'lr': 0.3, 'momentum': 0}), ('rnn0', {'lr': 0.1}), ] The first positional argument are the param groups. ] variable[kwargs] assign[=] call[name[self]._get_params_for, parameter[name[prefix]]] variable[params] assign[=] call[name[list], parameter[name[named_parameters]]] variable[pgroups] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da18fe93340>, <ast.Name object at 0x7da18fe92e30>]]] in starred[call[name[kwargs].pop, parameter[constant[param_groups], list[[]]]]] begin[:] variable[matches] assign[=] <ast.ListComp object at 0x7da18fe91840> if name[matches] begin[:] variable[p] assign[=] <ast.ListComp object at 0x7da18dc07490> call[name[pgroups].append, parameter[dictionary[[<ast.Constant object at 0x7da18dc055d0>, None], [<ast.Name object at 0x7da18dc079d0>, <ast.Name object at 0x7da18dc069b0>]]]] if name[params] begin[:] call[name[pgroups].append, parameter[dictionary[[<ast.Constant object at 0x7da18dc04d90>], [<ast.ListComp object at 0x7da18dc04a00>]]]] return[tuple[[<ast.List object at 0x7da18dc079a0>, <ast.Name object at 0x7da18dc068f0>]]]
keyword[def] identifier[_get_params_for_optimizer] ( identifier[self] , identifier[prefix] , identifier[named_parameters] ): literal[string] identifier[kwargs] = identifier[self] . identifier[_get_params_for] ( identifier[prefix] ) identifier[params] = identifier[list] ( identifier[named_parameters] ) identifier[pgroups] =[] keyword[for] identifier[pattern] , identifier[group] keyword[in] identifier[kwargs] . identifier[pop] ( literal[string] ,[]): identifier[matches] =[ identifier[i] keyword[for] identifier[i] ,( identifier[name] , identifier[_] ) keyword[in] identifier[enumerate] ( identifier[params] ) keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[name] , identifier[pattern] )] keyword[if] identifier[matches] : identifier[p] =[ identifier[params] . identifier[pop] ( identifier[i] )[ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[reversed] ( identifier[matches] )] identifier[pgroups] . identifier[append] ({ literal[string] : identifier[p] ,** identifier[group] }) keyword[if] identifier[params] : identifier[pgroups] . identifier[append] ({ literal[string] :[ identifier[p] keyword[for] identifier[_] , identifier[p] keyword[in] identifier[params] ]}) keyword[return] [ identifier[pgroups] ], identifier[kwargs]
def _get_params_for_optimizer(self, prefix, named_parameters): """Parse kwargs configuration for the optimizer identified by the given prefix. Supports param group assignment using wildcards: optimizer__lr=0.05, optimizer__param_groups=[ ('rnn*.period', {'lr': 0.3, 'momentum': 0}), ('rnn0', {'lr': 0.1}), ] The first positional argument are the param groups. """ kwargs = self._get_params_for(prefix) params = list(named_parameters) pgroups = [] for (pattern, group) in kwargs.pop('param_groups', []): matches = [i for (i, (name, _)) in enumerate(params) if fnmatch.fnmatch(name, pattern)] if matches: p = [params.pop(i)[1] for i in reversed(matches)] pgroups.append({'params': p, **group}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if params: pgroups.append({'params': [p for (_, p) in params]}) # depends on [control=['if'], data=[]] return ([pgroups], kwargs)
def translate_style(style, colormode, colorpalette): """ Translate the given style to an ANSI escape code sequence. ``style`` examples are: * green * bold * red_on_black * bold_green * italic_yellow_on_cyan :param str style: the style to translate :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :parma dict colorpalette: the color palette to use for the color name mapping """ style_parts = iter(style.split('_')) ansi_start_sequence = [] ansi_end_sequence = [] try: # consume all modifiers part = None for mod_part in style_parts: part = mod_part if part not in ansi.MODIFIERS: break # all modifiers have been consumed mod_start_code, mod_end_code = resolve_modifier_to_ansi_code(part, colormode) ansi_start_sequence.append(mod_start_code) ansi_end_sequence.append(mod_end_code) else: # we've consumed all parts, thus we can exit raise StopIteration() # next part has to be a foreground color or the 'on' keyword # which means we have to consume background colors if part != 'on': ansi_start_code, ansi_end_code = translate_colorname_to_ansi_code( part, ansi.FOREGROUND_COLOR_OFFSET, colormode, colorpalette) ansi_start_sequence.append(ansi_start_code) ansi_end_sequence.append(ansi_end_code) # consume the required 'on' keyword after the foreground color next(style_parts) # next part has to be the background color part = next(style_parts) ansi_start_code, ansi_end_code = translate_colorname_to_ansi_code( part, ansi.BACKGROUND_COLOR_OFFSET, colormode, colorpalette) ansi_start_sequence.append(ansi_start_code) ansi_end_sequence.append(ansi_end_code) except StopIteration: # we've consumed all parts of the styling string pass # construct and return ANSI escape code sequence return ''.join(ansi_start_sequence), ''.join(ansi_end_sequence)
def function[translate_style, parameter[style, colormode, colorpalette]]: constant[ Translate the given style to an ANSI escape code sequence. ``style`` examples are: * green * bold * red_on_black * bold_green * italic_yellow_on_cyan :param str style: the style to translate :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :parma dict colorpalette: the color palette to use for the color name mapping ] variable[style_parts] assign[=] call[name[iter], parameter[call[name[style].split, parameter[constant[_]]]]] variable[ansi_start_sequence] assign[=] list[[]] variable[ansi_end_sequence] assign[=] list[[]] <ast.Try object at 0x7da1b0140310> return[tuple[[<ast.Call object at 0x7da1b012ddb0>, <ast.Call object at 0x7da1b012c400>]]]
keyword[def] identifier[translate_style] ( identifier[style] , identifier[colormode] , identifier[colorpalette] ): literal[string] identifier[style_parts] = identifier[iter] ( identifier[style] . identifier[split] ( literal[string] )) identifier[ansi_start_sequence] =[] identifier[ansi_end_sequence] =[] keyword[try] : identifier[part] = keyword[None] keyword[for] identifier[mod_part] keyword[in] identifier[style_parts] : identifier[part] = identifier[mod_part] keyword[if] identifier[part] keyword[not] keyword[in] identifier[ansi] . identifier[MODIFIERS] : keyword[break] identifier[mod_start_code] , identifier[mod_end_code] = identifier[resolve_modifier_to_ansi_code] ( identifier[part] , identifier[colormode] ) identifier[ansi_start_sequence] . identifier[append] ( identifier[mod_start_code] ) identifier[ansi_end_sequence] . identifier[append] ( identifier[mod_end_code] ) keyword[else] : keyword[raise] identifier[StopIteration] () keyword[if] identifier[part] != literal[string] : identifier[ansi_start_code] , identifier[ansi_end_code] = identifier[translate_colorname_to_ansi_code] ( identifier[part] , identifier[ansi] . identifier[FOREGROUND_COLOR_OFFSET] , identifier[colormode] , identifier[colorpalette] ) identifier[ansi_start_sequence] . identifier[append] ( identifier[ansi_start_code] ) identifier[ansi_end_sequence] . identifier[append] ( identifier[ansi_end_code] ) identifier[next] ( identifier[style_parts] ) identifier[part] = identifier[next] ( identifier[style_parts] ) identifier[ansi_start_code] , identifier[ansi_end_code] = identifier[translate_colorname_to_ansi_code] ( identifier[part] , identifier[ansi] . identifier[BACKGROUND_COLOR_OFFSET] , identifier[colormode] , identifier[colorpalette] ) identifier[ansi_start_sequence] . identifier[append] ( identifier[ansi_start_code] ) identifier[ansi_end_sequence] . identifier[append] ( identifier[ansi_end_code] ) keyword[except] identifier[StopIteration] : keyword[pass] keyword[return] literal[string] . identifier[join] ( identifier[ansi_start_sequence] ), literal[string] . identifier[join] ( identifier[ansi_end_sequence] )
def translate_style(style, colormode, colorpalette): """ Translate the given style to an ANSI escape code sequence. ``style`` examples are: * green * bold * red_on_black * bold_green * italic_yellow_on_cyan :param str style: the style to translate :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :parma dict colorpalette: the color palette to use for the color name mapping """ style_parts = iter(style.split('_')) ansi_start_sequence = [] ansi_end_sequence = [] try: # consume all modifiers part = None for mod_part in style_parts: part = mod_part if part not in ansi.MODIFIERS: break # all modifiers have been consumed # depends on [control=['if'], data=[]] (mod_start_code, mod_end_code) = resolve_modifier_to_ansi_code(part, colormode) ansi_start_sequence.append(mod_start_code) ansi_end_sequence.append(mod_end_code) # depends on [control=['for'], data=['mod_part']] else: # we've consumed all parts, thus we can exit raise StopIteration() # next part has to be a foreground color or the 'on' keyword # which means we have to consume background colors if part != 'on': (ansi_start_code, ansi_end_code) = translate_colorname_to_ansi_code(part, ansi.FOREGROUND_COLOR_OFFSET, colormode, colorpalette) ansi_start_sequence.append(ansi_start_code) ansi_end_sequence.append(ansi_end_code) # consume the required 'on' keyword after the foreground color next(style_parts) # depends on [control=['if'], data=['part']] # next part has to be the background color part = next(style_parts) (ansi_start_code, ansi_end_code) = translate_colorname_to_ansi_code(part, ansi.BACKGROUND_COLOR_OFFSET, colormode, colorpalette) ansi_start_sequence.append(ansi_start_code) ansi_end_sequence.append(ansi_end_code) # depends on [control=['try'], data=[]] except StopIteration: # we've consumed all parts of the styling string pass # depends on [control=['except'], data=[]] # construct and return ANSI escape code sequence return (''.join(ansi_start_sequence), ''.join(ansi_end_sequence))
def diff(cwd, item1=None, item2=None, opts='', git_opts='', user=None, password=None, no_index=False, cached=False, paths=None, output_encoding=None): ''' .. versionadded:: 2015.8.12,2016.3.3,2016.11.0 Interface to `git-diff(1)`_ cwd The path to the git checkout item1 and item2 Revision(s) to pass to the ``git diff`` command. One or both of these arguments may be ignored if some of the options below are set to ``True``. When ``cached`` is ``False``, and no revisions are passed to this function, then the current working tree will be compared against the index (i.e. unstaged changes). When two revisions are passed, they will be compared to each other. opts Any additional options to add to the command line, in a single string .. note:: On the Salt CLI, if the opts are preceded with a dash, it is necessary to precede them with ``opts=`` (as in the CLI examples below) to avoid causing errors with Salt's own argument parsing. git_opts Any additional options to add to git command itself (not the ``diff`` subcommand), in a single string. This is useful for passing ``-c`` to run git with temporary changes to the git configuration. .. versionadded:: 2017.7.0 .. note:: This is only supported in git 1.7.2 and newer. user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 no_index : False When it is necessary to diff two files in the same repo against each other, and not diff two different revisions, set this option to ``True``. If this is left ``False`` in these instances, then a normal ``git diff`` will be performed against the index (i.e. unstaged changes), and files in the ``paths`` option will be used to narrow down the diff output. .. note:: Requires Git 1.5.1 or newer. Additionally, when set to ``True``, ``item1`` and ``item2`` will be ignored. cached : False If ``True``, compare staged changes to ``item1`` (if specified), otherwise compare them to the most recent commit. .. note:: ``item2`` is ignored if this option is is set to ``True``. paths File paths to pass to the ``git diff`` command. Can be passed as a comma-separated list or a Python list. output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-diff(1)`: http://git-scm.com/docs/git-diff CLI Example: .. code-block:: bash # Perform diff against the index (staging area for next commit) salt myminion git.diff /path/to/repo # Compare staged changes to the most recent commit salt myminion git.diff /path/to/repo cached=True # Compare staged changes to a specific revision salt myminion git.diff /path/to/repo mybranch cached=True # Perform diff against the most recent commit (includes staged changes) salt myminion git.diff /path/to/repo HEAD # Diff two commits salt myminion git.diff /path/to/repo abcdef1 aabbccd # Diff two commits, only showing differences in the specified paths salt myminion git.diff /path/to/repo abcdef1 aabbccd paths=path/to/file1,path/to/file2 # Diff two files with one being outside the working tree salt myminion git.diff /path/to/repo no_index=True paths=path/to/file1,/absolute/path/to/file2 ''' if no_index and cached: raise CommandExecutionError( 'The \'no_index\' and \'cached\' options cannot be used together' ) command = ['git'] + _format_git_opts(git_opts) command.append('diff') command.extend(_format_opts(opts)) if paths is not None and not isinstance(paths, (list, tuple)): try: paths = paths.split(',') except AttributeError: paths = six.text_type(paths).split(',') ignore_retcode = False failhard = True if no_index: if _LooseVersion(version(versioninfo=False)) < _LooseVersion('1.5.1'): raise CommandExecutionError( 'The \'no_index\' option is only supported in Git 1.5.1 and ' 'newer' ) ignore_retcode = True failhard = False command.append('--no-index') for value in [x for x in (item1, item2) if x]: log.warning( 'Revision \'%s\' ignored in git diff, as revisions cannot be ' 'used when no_index=True', value ) elif cached: command.append('--cached') if item1: command.append(item1) if item2: log.warning( 'Second revision \'%s\' ignored in git diff, at most one ' 'revision is considered when cached=True', item2 ) else: for value in [x for x in (item1, item2) if x]: command.append(value) if paths: command.append('--') command.extend(paths) return _git_run(command, cwd=cwd, user=user, password=password, ignore_retcode=ignore_retcode, failhard=failhard, redirect_stderr=True, output_encoding=output_encoding)['stdout']
def function[diff, parameter[cwd, item1, item2, opts, git_opts, user, password, no_index, cached, paths, output_encoding]]: constant[ .. versionadded:: 2015.8.12,2016.3.3,2016.11.0 Interface to `git-diff(1)`_ cwd The path to the git checkout item1 and item2 Revision(s) to pass to the ``git diff`` command. One or both of these arguments may be ignored if some of the options below are set to ``True``. When ``cached`` is ``False``, and no revisions are passed to this function, then the current working tree will be compared against the index (i.e. unstaged changes). When two revisions are passed, they will be compared to each other. opts Any additional options to add to the command line, in a single string .. note:: On the Salt CLI, if the opts are preceded with a dash, it is necessary to precede them with ``opts=`` (as in the CLI examples below) to avoid causing errors with Salt's own argument parsing. git_opts Any additional options to add to git command itself (not the ``diff`` subcommand), in a single string. This is useful for passing ``-c`` to run git with temporary changes to the git configuration. .. versionadded:: 2017.7.0 .. note:: This is only supported in git 1.7.2 and newer. user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 no_index : False When it is necessary to diff two files in the same repo against each other, and not diff two different revisions, set this option to ``True``. If this is left ``False`` in these instances, then a normal ``git diff`` will be performed against the index (i.e. unstaged changes), and files in the ``paths`` option will be used to narrow down the diff output. .. note:: Requires Git 1.5.1 or newer. Additionally, when set to ``True``, ``item1`` and ``item2`` will be ignored. cached : False If ``True``, compare staged changes to ``item1`` (if specified), otherwise compare them to the most recent commit. .. note:: ``item2`` is ignored if this option is is set to ``True``. paths File paths to pass to the ``git diff`` command. Can be passed as a comma-separated list or a Python list. output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-diff(1)`: http://git-scm.com/docs/git-diff CLI Example: .. code-block:: bash # Perform diff against the index (staging area for next commit) salt myminion git.diff /path/to/repo # Compare staged changes to the most recent commit salt myminion git.diff /path/to/repo cached=True # Compare staged changes to a specific revision salt myminion git.diff /path/to/repo mybranch cached=True # Perform diff against the most recent commit (includes staged changes) salt myminion git.diff /path/to/repo HEAD # Diff two commits salt myminion git.diff /path/to/repo abcdef1 aabbccd # Diff two commits, only showing differences in the specified paths salt myminion git.diff /path/to/repo abcdef1 aabbccd paths=path/to/file1,path/to/file2 # Diff two files with one being outside the working tree salt myminion git.diff /path/to/repo no_index=True paths=path/to/file1,/absolute/path/to/file2 ] if <ast.BoolOp object at 0x7da1b26ad0c0> begin[:] <ast.Raise object at 0x7da1b26ae2f0> variable[command] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b26adf60>]] + call[name[_format_git_opts], parameter[name[git_opts]]]] call[name[command].append, parameter[constant[diff]]] call[name[command].extend, parameter[call[name[_format_opts], parameter[name[opts]]]]] if <ast.BoolOp object at 0x7da1b26ad9c0> begin[:] <ast.Try object at 0x7da1b26afd00> variable[ignore_retcode] assign[=] constant[False] variable[failhard] assign[=] constant[True] if name[no_index] begin[:] if compare[call[name[_LooseVersion], parameter[call[name[version], parameter[]]]] less[<] call[name[_LooseVersion], parameter[constant[1.5.1]]]] begin[:] <ast.Raise object at 0x7da1b26af610> variable[ignore_retcode] assign[=] constant[True] variable[failhard] assign[=] constant[False] call[name[command].append, parameter[constant[--no-index]]] for taget[name[value]] in starred[<ast.ListComp object at 0x7da1b26af2e0>] begin[:] call[name[log].warning, parameter[constant[Revision '%s' ignored in git diff, as revisions cannot be used when no_index=True], name[value]]] if name[paths] begin[:] call[name[command].append, parameter[constant[--]]] call[name[command].extend, parameter[name[paths]]] return[call[call[name[_git_run], parameter[name[command]]]][constant[stdout]]]
keyword[def] identifier[diff] ( identifier[cwd] , identifier[item1] = keyword[None] , identifier[item2] = keyword[None] , identifier[opts] = literal[string] , identifier[git_opts] = literal[string] , identifier[user] = keyword[None] , identifier[password] = keyword[None] , identifier[no_index] = keyword[False] , identifier[cached] = keyword[False] , identifier[paths] = keyword[None] , identifier[output_encoding] = keyword[None] ): literal[string] keyword[if] identifier[no_index] keyword[and] identifier[cached] : keyword[raise] identifier[CommandExecutionError] ( literal[string] ) identifier[command] =[ literal[string] ]+ identifier[_format_git_opts] ( identifier[git_opts] ) identifier[command] . identifier[append] ( literal[string] ) identifier[command] . identifier[extend] ( identifier[_format_opts] ( identifier[opts] )) keyword[if] identifier[paths] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[paths] ,( identifier[list] , identifier[tuple] )): keyword[try] : identifier[paths] = identifier[paths] . identifier[split] ( literal[string] ) keyword[except] identifier[AttributeError] : identifier[paths] = identifier[six] . identifier[text_type] ( identifier[paths] ). identifier[split] ( literal[string] ) identifier[ignore_retcode] = keyword[False] identifier[failhard] = keyword[True] keyword[if] identifier[no_index] : keyword[if] identifier[_LooseVersion] ( identifier[version] ( identifier[versioninfo] = keyword[False] ))< identifier[_LooseVersion] ( literal[string] ): keyword[raise] identifier[CommandExecutionError] ( literal[string] literal[string] ) identifier[ignore_retcode] = keyword[True] identifier[failhard] = keyword[False] identifier[command] . identifier[append] ( literal[string] ) keyword[for] identifier[value] keyword[in] [ identifier[x] keyword[for] identifier[x] keyword[in] ( identifier[item1] , identifier[item2] ) keyword[if] identifier[x] ]: identifier[log] . identifier[warning] ( literal[string] literal[string] , identifier[value] ) keyword[elif] identifier[cached] : identifier[command] . identifier[append] ( literal[string] ) keyword[if] identifier[item1] : identifier[command] . identifier[append] ( identifier[item1] ) keyword[if] identifier[item2] : identifier[log] . identifier[warning] ( literal[string] literal[string] , identifier[item2] ) keyword[else] : keyword[for] identifier[value] keyword[in] [ identifier[x] keyword[for] identifier[x] keyword[in] ( identifier[item1] , identifier[item2] ) keyword[if] identifier[x] ]: identifier[command] . identifier[append] ( identifier[value] ) keyword[if] identifier[paths] : identifier[command] . identifier[append] ( literal[string] ) identifier[command] . identifier[extend] ( identifier[paths] ) keyword[return] identifier[_git_run] ( identifier[command] , identifier[cwd] = identifier[cwd] , identifier[user] = identifier[user] , identifier[password] = identifier[password] , identifier[ignore_retcode] = identifier[ignore_retcode] , identifier[failhard] = identifier[failhard] , identifier[redirect_stderr] = keyword[True] , identifier[output_encoding] = identifier[output_encoding] )[ literal[string] ]
def diff(cwd, item1=None, item2=None, opts='', git_opts='', user=None, password=None, no_index=False, cached=False, paths=None, output_encoding=None): """ .. versionadded:: 2015.8.12,2016.3.3,2016.11.0 Interface to `git-diff(1)`_ cwd The path to the git checkout item1 and item2 Revision(s) to pass to the ``git diff`` command. One or both of these arguments may be ignored if some of the options below are set to ``True``. When ``cached`` is ``False``, and no revisions are passed to this function, then the current working tree will be compared against the index (i.e. unstaged changes). When two revisions are passed, they will be compared to each other. opts Any additional options to add to the command line, in a single string .. note:: On the Salt CLI, if the opts are preceded with a dash, it is necessary to precede them with ``opts=`` (as in the CLI examples below) to avoid causing errors with Salt's own argument parsing. git_opts Any additional options to add to git command itself (not the ``diff`` subcommand), in a single string. This is useful for passing ``-c`` to run git with temporary changes to the git configuration. .. versionadded:: 2017.7.0 .. note:: This is only supported in git 1.7.2 and newer. user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 no_index : False When it is necessary to diff two files in the same repo against each other, and not diff two different revisions, set this option to ``True``. If this is left ``False`` in these instances, then a normal ``git diff`` will be performed against the index (i.e. unstaged changes), and files in the ``paths`` option will be used to narrow down the diff output. .. note:: Requires Git 1.5.1 or newer. Additionally, when set to ``True``, ``item1`` and ``item2`` will be ignored. cached : False If ``True``, compare staged changes to ``item1`` (if specified), otherwise compare them to the most recent commit. .. note:: ``item2`` is ignored if this option is is set to ``True``. paths File paths to pass to the ``git diff`` command. Can be passed as a comma-separated list or a Python list. output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-diff(1)`: http://git-scm.com/docs/git-diff CLI Example: .. code-block:: bash # Perform diff against the index (staging area for next commit) salt myminion git.diff /path/to/repo # Compare staged changes to the most recent commit salt myminion git.diff /path/to/repo cached=True # Compare staged changes to a specific revision salt myminion git.diff /path/to/repo mybranch cached=True # Perform diff against the most recent commit (includes staged changes) salt myminion git.diff /path/to/repo HEAD # Diff two commits salt myminion git.diff /path/to/repo abcdef1 aabbccd # Diff two commits, only showing differences in the specified paths salt myminion git.diff /path/to/repo abcdef1 aabbccd paths=path/to/file1,path/to/file2 # Diff two files with one being outside the working tree salt myminion git.diff /path/to/repo no_index=True paths=path/to/file1,/absolute/path/to/file2 """ if no_index and cached: raise CommandExecutionError("The 'no_index' and 'cached' options cannot be used together") # depends on [control=['if'], data=[]] command = ['git'] + _format_git_opts(git_opts) command.append('diff') command.extend(_format_opts(opts)) if paths is not None and (not isinstance(paths, (list, tuple))): try: paths = paths.split(',') # depends on [control=['try'], data=[]] except AttributeError: paths = six.text_type(paths).split(',') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] ignore_retcode = False failhard = True if no_index: if _LooseVersion(version(versioninfo=False)) < _LooseVersion('1.5.1'): raise CommandExecutionError("The 'no_index' option is only supported in Git 1.5.1 and newer") # depends on [control=['if'], data=[]] ignore_retcode = True failhard = False command.append('--no-index') for value in [x for x in (item1, item2) if x]: log.warning("Revision '%s' ignored in git diff, as revisions cannot be used when no_index=True", value) # depends on [control=['for'], data=['value']] # depends on [control=['if'], data=[]] elif cached: command.append('--cached') if item1: command.append(item1) # depends on [control=['if'], data=[]] if item2: log.warning("Second revision '%s' ignored in git diff, at most one revision is considered when cached=True", item2) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: for value in [x for x in (item1, item2) if x]: command.append(value) # depends on [control=['for'], data=['value']] if paths: command.append('--') command.extend(paths) # depends on [control=['if'], data=[]] return _git_run(command, cwd=cwd, user=user, password=password, ignore_retcode=ignore_retcode, failhard=failhard, redirect_stderr=True, output_encoding=output_encoding)['stdout']
def _l_cv_weight(self, donor_catchment): """ Return L-CV weighting for a donor catchment. Methodology source: Science Report SC050050, eqn. 6.18 and 6.22a """ try: dist = donor_catchment.similarity_dist except AttributeError: dist = self._similarity_distance(self.catchment, donor_catchment) b = 0.0047 * sqrt(dist) + 0.0023 / 2 c = 0.02609 / (donor_catchment.record_length - 1) return 1 / (b + c)
def function[_l_cv_weight, parameter[self, donor_catchment]]: constant[ Return L-CV weighting for a donor catchment. Methodology source: Science Report SC050050, eqn. 6.18 and 6.22a ] <ast.Try object at 0x7da20e9b1060> variable[b] assign[=] binary_operation[binary_operation[constant[0.0047] * call[name[sqrt], parameter[name[dist]]]] + binary_operation[constant[0.0023] / constant[2]]] variable[c] assign[=] binary_operation[constant[0.02609] / binary_operation[name[donor_catchment].record_length - constant[1]]] return[binary_operation[constant[1] / binary_operation[name[b] + name[c]]]]
keyword[def] identifier[_l_cv_weight] ( identifier[self] , identifier[donor_catchment] ): literal[string] keyword[try] : identifier[dist] = identifier[donor_catchment] . identifier[similarity_dist] keyword[except] identifier[AttributeError] : identifier[dist] = identifier[self] . identifier[_similarity_distance] ( identifier[self] . identifier[catchment] , identifier[donor_catchment] ) identifier[b] = literal[int] * identifier[sqrt] ( identifier[dist] )+ literal[int] / literal[int] identifier[c] = literal[int] /( identifier[donor_catchment] . identifier[record_length] - literal[int] ) keyword[return] literal[int] /( identifier[b] + identifier[c] )
def _l_cv_weight(self, donor_catchment): """ Return L-CV weighting for a donor catchment. Methodology source: Science Report SC050050, eqn. 6.18 and 6.22a """ try: dist = donor_catchment.similarity_dist # depends on [control=['try'], data=[]] except AttributeError: dist = self._similarity_distance(self.catchment, donor_catchment) # depends on [control=['except'], data=[]] b = 0.0047 * sqrt(dist) + 0.0023 / 2 c = 0.02609 / (donor_catchment.record_length - 1) return 1 / (b + c)
def setMaximumWidth(self, width): """ Sets the maximum width value to the inputed width and emits the \ sizeConstraintChanged signal. :param width | <int> """ super(XView, self).setMaximumWidth(width) if ( not self.signalsBlocked() ): self.sizeConstraintChanged.emit()
def function[setMaximumWidth, parameter[self, width]]: constant[ Sets the maximum width value to the inputed width and emits the sizeConstraintChanged signal. :param width | <int> ] call[call[name[super], parameter[name[XView], name[self]]].setMaximumWidth, parameter[name[width]]] if <ast.UnaryOp object at 0x7da2041d8c10> begin[:] call[name[self].sizeConstraintChanged.emit, parameter[]]
keyword[def] identifier[setMaximumWidth] ( identifier[self] , identifier[width] ): literal[string] identifier[super] ( identifier[XView] , identifier[self] ). identifier[setMaximumWidth] ( identifier[width] ) keyword[if] ( keyword[not] identifier[self] . identifier[signalsBlocked] ()): identifier[self] . identifier[sizeConstraintChanged] . identifier[emit] ()
def setMaximumWidth(self, width): """ Sets the maximum width value to the inputed width and emits the sizeConstraintChanged signal. :param width | <int> """ super(XView, self).setMaximumWidth(width) if not self.signalsBlocked(): self.sizeConstraintChanged.emit() # depends on [control=['if'], data=[]]
def get_link(self, path, method, callback, view): """ Return a `coreapi.Link` instance for the given endpoint. """ fields = self.get_path_fields(path, method, callback, view) fields += self.get_serializer_fields(path, method, callback, view) fields += self.get_pagination_fields(path, method, callback, view) fields += self.get_filter_fields(path, method, callback, view) if fields and any([field.location in ('form', 'body') for field in fields]): encoding = self.get_encoding(path, method, callback, view) else: encoding = None description = self.get_description(path, method, callback, view) link = coreapi.Link( url=urlparse.urljoin(self.url, path), action=method.lower(), encoding=encoding, description=description, fields=fields, transform=None, # Not handled, but here for future reference ) link._responses = self.get_responses(path, method, callback, view) link._produces = self.get_produces(path, method, callback, view) return link
def function[get_link, parameter[self, path, method, callback, view]]: constant[ Return a `coreapi.Link` instance for the given endpoint. ] variable[fields] assign[=] call[name[self].get_path_fields, parameter[name[path], name[method], name[callback], name[view]]] <ast.AugAssign object at 0x7da2047ead10> <ast.AugAssign object at 0x7da2047ea140> <ast.AugAssign object at 0x7da2047eb040> if <ast.BoolOp object at 0x7da2047ebfd0> begin[:] variable[encoding] assign[=] call[name[self].get_encoding, parameter[name[path], name[method], name[callback], name[view]]] variable[description] assign[=] call[name[self].get_description, parameter[name[path], name[method], name[callback], name[view]]] variable[link] assign[=] call[name[coreapi].Link, parameter[]] name[link]._responses assign[=] call[name[self].get_responses, parameter[name[path], name[method], name[callback], name[view]]] name[link]._produces assign[=] call[name[self].get_produces, parameter[name[path], name[method], name[callback], name[view]]] return[name[link]]
keyword[def] identifier[get_link] ( identifier[self] , identifier[path] , identifier[method] , identifier[callback] , identifier[view] ): literal[string] identifier[fields] = identifier[self] . identifier[get_path_fields] ( identifier[path] , identifier[method] , identifier[callback] , identifier[view] ) identifier[fields] += identifier[self] . identifier[get_serializer_fields] ( identifier[path] , identifier[method] , identifier[callback] , identifier[view] ) identifier[fields] += identifier[self] . identifier[get_pagination_fields] ( identifier[path] , identifier[method] , identifier[callback] , identifier[view] ) identifier[fields] += identifier[self] . identifier[get_filter_fields] ( identifier[path] , identifier[method] , identifier[callback] , identifier[view] ) keyword[if] identifier[fields] keyword[and] identifier[any] ([ identifier[field] . identifier[location] keyword[in] ( literal[string] , literal[string] ) keyword[for] identifier[field] keyword[in] identifier[fields] ]): identifier[encoding] = identifier[self] . identifier[get_encoding] ( identifier[path] , identifier[method] , identifier[callback] , identifier[view] ) keyword[else] : identifier[encoding] = keyword[None] identifier[description] = identifier[self] . identifier[get_description] ( identifier[path] , identifier[method] , identifier[callback] , identifier[view] ) identifier[link] = identifier[coreapi] . identifier[Link] ( identifier[url] = identifier[urlparse] . identifier[urljoin] ( identifier[self] . identifier[url] , identifier[path] ), identifier[action] = identifier[method] . identifier[lower] (), identifier[encoding] = identifier[encoding] , identifier[description] = identifier[description] , identifier[fields] = identifier[fields] , identifier[transform] = keyword[None] , ) identifier[link] . identifier[_responses] = identifier[self] . identifier[get_responses] ( identifier[path] , identifier[method] , identifier[callback] , identifier[view] ) identifier[link] . identifier[_produces] = identifier[self] . identifier[get_produces] ( identifier[path] , identifier[method] , identifier[callback] , identifier[view] ) keyword[return] identifier[link]
def get_link(self, path, method, callback, view): """ Return a `coreapi.Link` instance for the given endpoint. """ fields = self.get_path_fields(path, method, callback, view) fields += self.get_serializer_fields(path, method, callback, view) fields += self.get_pagination_fields(path, method, callback, view) fields += self.get_filter_fields(path, method, callback, view) if fields and any([field.location in ('form', 'body') for field in fields]): encoding = self.get_encoding(path, method, callback, view) # depends on [control=['if'], data=[]] else: encoding = None description = self.get_description(path, method, callback, view) # Not handled, but here for future reference link = coreapi.Link(url=urlparse.urljoin(self.url, path), action=method.lower(), encoding=encoding, description=description, fields=fields, transform=None) link._responses = self.get_responses(path, method, callback, view) link._produces = self.get_produces(path, method, callback, view) return link
def create_stream(self, stream_id, sandbox=None): """ Create the stream :param stream_id: The stream identifier :param sandbox: The sandbox for this stream :return: None :raises: NotImplementedError """ if sandbox is not None: raise NotImplementedError logging.debug("Creating asset stream {}".format(stream_id)) if stream_id in self.streams: raise StreamAlreadyExistsError("Stream with id '{}' already exists".format(stream_id)) stream = AssetStream(channel=self, stream_id=stream_id, calculated_intervals=None, last_accessed=utcnow(), last_updated=utcnow(), sandbox=sandbox) self.streams[stream_id] = stream return stream
def function[create_stream, parameter[self, stream_id, sandbox]]: constant[ Create the stream :param stream_id: The stream identifier :param sandbox: The sandbox for this stream :return: None :raises: NotImplementedError ] if compare[name[sandbox] is_not constant[None]] begin[:] <ast.Raise object at 0x7da1b26aed40> call[name[logging].debug, parameter[call[constant[Creating asset stream {}].format, parameter[name[stream_id]]]]] if compare[name[stream_id] in name[self].streams] begin[:] <ast.Raise object at 0x7da1b26af490> variable[stream] assign[=] call[name[AssetStream], parameter[]] call[name[self].streams][name[stream_id]] assign[=] name[stream] return[name[stream]]
keyword[def] identifier[create_stream] ( identifier[self] , identifier[stream_id] , identifier[sandbox] = keyword[None] ): literal[string] keyword[if] identifier[sandbox] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[NotImplementedError] identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[stream_id] )) keyword[if] identifier[stream_id] keyword[in] identifier[self] . identifier[streams] : keyword[raise] identifier[StreamAlreadyExistsError] ( literal[string] . identifier[format] ( identifier[stream_id] )) identifier[stream] = identifier[AssetStream] ( identifier[channel] = identifier[self] , identifier[stream_id] = identifier[stream_id] , identifier[calculated_intervals] = keyword[None] , identifier[last_accessed] = identifier[utcnow] (), identifier[last_updated] = identifier[utcnow] (), identifier[sandbox] = identifier[sandbox] ) identifier[self] . identifier[streams] [ identifier[stream_id] ]= identifier[stream] keyword[return] identifier[stream]
def create_stream(self, stream_id, sandbox=None): """ Create the stream :param stream_id: The stream identifier :param sandbox: The sandbox for this stream :return: None :raises: NotImplementedError """ if sandbox is not None: raise NotImplementedError # depends on [control=['if'], data=[]] logging.debug('Creating asset stream {}'.format(stream_id)) if stream_id in self.streams: raise StreamAlreadyExistsError("Stream with id '{}' already exists".format(stream_id)) # depends on [control=['if'], data=['stream_id']] stream = AssetStream(channel=self, stream_id=stream_id, calculated_intervals=None, last_accessed=utcnow(), last_updated=utcnow(), sandbox=sandbox) self.streams[stream_id] = stream return stream
def available_add_ons(self): """ :rtype: twilio.rest.preview.marketplace.available_add_on.AvailableAddOnList """ if self._available_add_ons is None: self._available_add_ons = AvailableAddOnList(self) return self._available_add_ons
def function[available_add_ons, parameter[self]]: constant[ :rtype: twilio.rest.preview.marketplace.available_add_on.AvailableAddOnList ] if compare[name[self]._available_add_ons is constant[None]] begin[:] name[self]._available_add_ons assign[=] call[name[AvailableAddOnList], parameter[name[self]]] return[name[self]._available_add_ons]
keyword[def] identifier[available_add_ons] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_available_add_ons] keyword[is] keyword[None] : identifier[self] . identifier[_available_add_ons] = identifier[AvailableAddOnList] ( identifier[self] ) keyword[return] identifier[self] . identifier[_available_add_ons]
def available_add_ons(self): """ :rtype: twilio.rest.preview.marketplace.available_add_on.AvailableAddOnList """ if self._available_add_ons is None: self._available_add_ons = AvailableAddOnList(self) # depends on [control=['if'], data=[]] return self._available_add_ons
def __get_sigmas(self): """will populate the stack_sigma dictionary with the energy and sigma array for all the compound/element and isotopes""" stack_sigma = {} _stack = self.stack _file_path = os.path.abspath(os.path.dirname(__file__)) _database_folder = os.path.join(_file_path, 'reference_data', self.database) _list_compounds = _stack.keys() for _compound in _list_compounds: _list_element = _stack[_compound]['elements'] stack_sigma[_compound] = {} for _element in _list_element: stack_sigma[_compound][_element] = {} _list_isotopes = _stack[_compound][_element]['isotopes']['list'] _list_file_names = _stack[_compound][_element]['isotopes']['file_names'] _list_isotopic_ratio = _stack[_compound][_element]['isotopes']['isotopic_ratio'] _iso_file_ratio = zip(_list_isotopes, _list_file_names, _list_isotopic_ratio) stack_sigma[_compound][_element]['isotopic_ratio'] = _list_isotopic_ratio # _dict_sigma_isotopes_sum = {} _sigma_all_isotopes = 0 _energy_all_isotpes = 0 for _iso, _file, _ratio in _iso_file_ratio: stack_sigma[_compound][_element][_iso] = {} _file = os.path.join(_database_folder, _file) _dict = _utilities.get_sigma(database_file_name=_file, e_min=self.energy_min, e_max=self.energy_max, e_step=self.energy_step) stack_sigma[_compound][_element][_iso]['energy_eV'] = _dict['energy_eV'] stack_sigma[_compound][_element][_iso]['sigma_b'] = _dict['sigma_b'] * _ratio stack_sigma[_compound][_element][_iso]['sigma_b_raw'] = _dict['sigma_b'] # sigma for all isotopes with their isotopic ratio _sigma_all_isotopes += _dict['sigma_b'] * _ratio _energy_all_isotpes += _dict['energy_eV'] # energy axis (x-axis) is averaged to take into account differences between x-axis of isotopes _mean_energy_all_isotopes = _energy_all_isotpes / len(_list_isotopes) stack_sigma[_compound][_element]['energy_eV'] = _mean_energy_all_isotopes stack_sigma[_compound][_element]['sigma_b'] = _sigma_all_isotopes self.stack_sigma = stack_sigma
def function[__get_sigmas, parameter[self]]: constant[will populate the stack_sigma dictionary with the energy and sigma array for all the compound/element and isotopes] variable[stack_sigma] assign[=] dictionary[[], []] variable[_stack] assign[=] name[self].stack variable[_file_path] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.dirname, parameter[name[__file__]]]]] variable[_database_folder] assign[=] call[name[os].path.join, parameter[name[_file_path], constant[reference_data], name[self].database]] variable[_list_compounds] assign[=] call[name[_stack].keys, parameter[]] for taget[name[_compound]] in starred[name[_list_compounds]] begin[:] variable[_list_element] assign[=] call[call[name[_stack]][name[_compound]]][constant[elements]] call[name[stack_sigma]][name[_compound]] assign[=] dictionary[[], []] for taget[name[_element]] in starred[name[_list_element]] begin[:] call[call[name[stack_sigma]][name[_compound]]][name[_element]] assign[=] dictionary[[], []] variable[_list_isotopes] assign[=] call[call[call[call[name[_stack]][name[_compound]]][name[_element]]][constant[isotopes]]][constant[list]] variable[_list_file_names] assign[=] call[call[call[call[name[_stack]][name[_compound]]][name[_element]]][constant[isotopes]]][constant[file_names]] variable[_list_isotopic_ratio] assign[=] call[call[call[call[name[_stack]][name[_compound]]][name[_element]]][constant[isotopes]]][constant[isotopic_ratio]] variable[_iso_file_ratio] assign[=] call[name[zip], parameter[name[_list_isotopes], name[_list_file_names], name[_list_isotopic_ratio]]] call[call[call[name[stack_sigma]][name[_compound]]][name[_element]]][constant[isotopic_ratio]] assign[=] name[_list_isotopic_ratio] variable[_sigma_all_isotopes] assign[=] constant[0] variable[_energy_all_isotpes] assign[=] constant[0] for taget[tuple[[<ast.Name object at 0x7da18f812140>, <ast.Name object at 0x7da18f8119c0>, <ast.Name object at 0x7da18f811030>]]] in starred[name[_iso_file_ratio]] begin[:] call[call[call[name[stack_sigma]][name[_compound]]][name[_element]]][name[_iso]] assign[=] dictionary[[], []] variable[_file] assign[=] call[name[os].path.join, parameter[name[_database_folder], name[_file]]] variable[_dict] assign[=] call[name[_utilities].get_sigma, parameter[]] call[call[call[call[name[stack_sigma]][name[_compound]]][name[_element]]][name[_iso]]][constant[energy_eV]] assign[=] call[name[_dict]][constant[energy_eV]] call[call[call[call[name[stack_sigma]][name[_compound]]][name[_element]]][name[_iso]]][constant[sigma_b]] assign[=] binary_operation[call[name[_dict]][constant[sigma_b]] * name[_ratio]] call[call[call[call[name[stack_sigma]][name[_compound]]][name[_element]]][name[_iso]]][constant[sigma_b_raw]] assign[=] call[name[_dict]][constant[sigma_b]] <ast.AugAssign object at 0x7da20c6a9c90> <ast.AugAssign object at 0x7da18f813d90> variable[_mean_energy_all_isotopes] assign[=] binary_operation[name[_energy_all_isotpes] / call[name[len], parameter[name[_list_isotopes]]]] call[call[call[name[stack_sigma]][name[_compound]]][name[_element]]][constant[energy_eV]] assign[=] name[_mean_energy_all_isotopes] call[call[call[name[stack_sigma]][name[_compound]]][name[_element]]][constant[sigma_b]] assign[=] name[_sigma_all_isotopes] name[self].stack_sigma assign[=] name[stack_sigma]
keyword[def] identifier[__get_sigmas] ( identifier[self] ): literal[string] identifier[stack_sigma] ={} identifier[_stack] = identifier[self] . identifier[stack] identifier[_file_path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] )) identifier[_database_folder] = identifier[os] . identifier[path] . identifier[join] ( identifier[_file_path] , literal[string] , identifier[self] . identifier[database] ) identifier[_list_compounds] = identifier[_stack] . identifier[keys] () keyword[for] identifier[_compound] keyword[in] identifier[_list_compounds] : identifier[_list_element] = identifier[_stack] [ identifier[_compound] ][ literal[string] ] identifier[stack_sigma] [ identifier[_compound] ]={} keyword[for] identifier[_element] keyword[in] identifier[_list_element] : identifier[stack_sigma] [ identifier[_compound] ][ identifier[_element] ]={} identifier[_list_isotopes] = identifier[_stack] [ identifier[_compound] ][ identifier[_element] ][ literal[string] ][ literal[string] ] identifier[_list_file_names] = identifier[_stack] [ identifier[_compound] ][ identifier[_element] ][ literal[string] ][ literal[string] ] identifier[_list_isotopic_ratio] = identifier[_stack] [ identifier[_compound] ][ identifier[_element] ][ literal[string] ][ literal[string] ] identifier[_iso_file_ratio] = identifier[zip] ( identifier[_list_isotopes] , identifier[_list_file_names] , identifier[_list_isotopic_ratio] ) identifier[stack_sigma] [ identifier[_compound] ][ identifier[_element] ][ literal[string] ]= identifier[_list_isotopic_ratio] identifier[_sigma_all_isotopes] = literal[int] identifier[_energy_all_isotpes] = literal[int] keyword[for] identifier[_iso] , identifier[_file] , identifier[_ratio] keyword[in] identifier[_iso_file_ratio] : identifier[stack_sigma] [ identifier[_compound] ][ identifier[_element] ][ identifier[_iso] ]={} identifier[_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[_database_folder] , identifier[_file] ) identifier[_dict] = identifier[_utilities] . identifier[get_sigma] ( identifier[database_file_name] = identifier[_file] , identifier[e_min] = identifier[self] . identifier[energy_min] , identifier[e_max] = identifier[self] . identifier[energy_max] , identifier[e_step] = identifier[self] . identifier[energy_step] ) identifier[stack_sigma] [ identifier[_compound] ][ identifier[_element] ][ identifier[_iso] ][ literal[string] ]= identifier[_dict] [ literal[string] ] identifier[stack_sigma] [ identifier[_compound] ][ identifier[_element] ][ identifier[_iso] ][ literal[string] ]= identifier[_dict] [ literal[string] ]* identifier[_ratio] identifier[stack_sigma] [ identifier[_compound] ][ identifier[_element] ][ identifier[_iso] ][ literal[string] ]= identifier[_dict] [ literal[string] ] identifier[_sigma_all_isotopes] += identifier[_dict] [ literal[string] ]* identifier[_ratio] identifier[_energy_all_isotpes] += identifier[_dict] [ literal[string] ] identifier[_mean_energy_all_isotopes] = identifier[_energy_all_isotpes] / identifier[len] ( identifier[_list_isotopes] ) identifier[stack_sigma] [ identifier[_compound] ][ identifier[_element] ][ literal[string] ]= identifier[_mean_energy_all_isotopes] identifier[stack_sigma] [ identifier[_compound] ][ identifier[_element] ][ literal[string] ]= identifier[_sigma_all_isotopes] identifier[self] . identifier[stack_sigma] = identifier[stack_sigma]
def __get_sigmas(self): """will populate the stack_sigma dictionary with the energy and sigma array for all the compound/element and isotopes""" stack_sigma = {} _stack = self.stack _file_path = os.path.abspath(os.path.dirname(__file__)) _database_folder = os.path.join(_file_path, 'reference_data', self.database) _list_compounds = _stack.keys() for _compound in _list_compounds: _list_element = _stack[_compound]['elements'] stack_sigma[_compound] = {} for _element in _list_element: stack_sigma[_compound][_element] = {} _list_isotopes = _stack[_compound][_element]['isotopes']['list'] _list_file_names = _stack[_compound][_element]['isotopes']['file_names'] _list_isotopic_ratio = _stack[_compound][_element]['isotopes']['isotopic_ratio'] _iso_file_ratio = zip(_list_isotopes, _list_file_names, _list_isotopic_ratio) stack_sigma[_compound][_element]['isotopic_ratio'] = _list_isotopic_ratio # _dict_sigma_isotopes_sum = {} _sigma_all_isotopes = 0 _energy_all_isotpes = 0 for (_iso, _file, _ratio) in _iso_file_ratio: stack_sigma[_compound][_element][_iso] = {} _file = os.path.join(_database_folder, _file) _dict = _utilities.get_sigma(database_file_name=_file, e_min=self.energy_min, e_max=self.energy_max, e_step=self.energy_step) stack_sigma[_compound][_element][_iso]['energy_eV'] = _dict['energy_eV'] stack_sigma[_compound][_element][_iso]['sigma_b'] = _dict['sigma_b'] * _ratio stack_sigma[_compound][_element][_iso]['sigma_b_raw'] = _dict['sigma_b'] # sigma for all isotopes with their isotopic ratio _sigma_all_isotopes += _dict['sigma_b'] * _ratio _energy_all_isotpes += _dict['energy_eV'] # depends on [control=['for'], data=[]] # energy axis (x-axis) is averaged to take into account differences between x-axis of isotopes _mean_energy_all_isotopes = _energy_all_isotpes / len(_list_isotopes) stack_sigma[_compound][_element]['energy_eV'] = _mean_energy_all_isotopes stack_sigma[_compound][_element]['sigma_b'] = _sigma_all_isotopes # depends on [control=['for'], data=['_element']] # depends on [control=['for'], data=['_compound']] self.stack_sigma = stack_sigma
def write(url, object_, **args): """Writes an object to a data URI.""" default_content_type = ('text/plain', {'charset': 'US-ASCII'}) content_encoding = args.get('content_encoding', 'base64') content_type, params = args.get('content_type', default_content_type) data = content_types.get(content_type).format(object_, **params) args['data'].write('data:{}'.format(content_type)) for param, value in params.items(): args['data'].write(';{}={}'.format(param, value)) if content_encoding == 'base64': args['data'].write(';base64,{}'.format(base64.b64decode(data))) else: args['data'].write(',{}', urllib.quote(data)) args['data'].seek(0)
def function[write, parameter[url, object_]]: constant[Writes an object to a data URI.] variable[default_content_type] assign[=] tuple[[<ast.Constant object at 0x7da1b26ad900>, <ast.Dict object at 0x7da1b26afd60>]] variable[content_encoding] assign[=] call[name[args].get, parameter[constant[content_encoding], constant[base64]]] <ast.Tuple object at 0x7da1b26ad660> assign[=] call[name[args].get, parameter[constant[content_type], name[default_content_type]]] variable[data] assign[=] call[call[name[content_types].get, parameter[name[content_type]]].format, parameter[name[object_]]] call[call[name[args]][constant[data]].write, parameter[call[constant[data:{}].format, parameter[name[content_type]]]]] for taget[tuple[[<ast.Name object at 0x7da1b26adcc0>, <ast.Name object at 0x7da1b26acf70>]]] in starred[call[name[params].items, parameter[]]] begin[:] call[call[name[args]][constant[data]].write, parameter[call[constant[;{}={}].format, parameter[name[param], name[value]]]]] if compare[name[content_encoding] equal[==] constant[base64]] begin[:] call[call[name[args]][constant[data]].write, parameter[call[constant[;base64,{}].format, parameter[call[name[base64].b64decode, parameter[name[data]]]]]]] call[call[name[args]][constant[data]].seek, parameter[constant[0]]]
keyword[def] identifier[write] ( identifier[url] , identifier[object_] ,** identifier[args] ): literal[string] identifier[default_content_type] =( literal[string] ,{ literal[string] : literal[string] }) identifier[content_encoding] = identifier[args] . identifier[get] ( literal[string] , literal[string] ) identifier[content_type] , identifier[params] = identifier[args] . identifier[get] ( literal[string] , identifier[default_content_type] ) identifier[data] = identifier[content_types] . identifier[get] ( identifier[content_type] ). identifier[format] ( identifier[object_] ,** identifier[params] ) identifier[args] [ literal[string] ]. identifier[write] ( literal[string] . identifier[format] ( identifier[content_type] )) keyword[for] identifier[param] , identifier[value] keyword[in] identifier[params] . identifier[items] (): identifier[args] [ literal[string] ]. identifier[write] ( literal[string] . identifier[format] ( identifier[param] , identifier[value] )) keyword[if] identifier[content_encoding] == literal[string] : identifier[args] [ literal[string] ]. identifier[write] ( literal[string] . identifier[format] ( identifier[base64] . identifier[b64decode] ( identifier[data] ))) keyword[else] : identifier[args] [ literal[string] ]. identifier[write] ( literal[string] , identifier[urllib] . identifier[quote] ( identifier[data] )) identifier[args] [ literal[string] ]. identifier[seek] ( literal[int] )
def write(url, object_, **args): """Writes an object to a data URI.""" default_content_type = ('text/plain', {'charset': 'US-ASCII'}) content_encoding = args.get('content_encoding', 'base64') (content_type, params) = args.get('content_type', default_content_type) data = content_types.get(content_type).format(object_, **params) args['data'].write('data:{}'.format(content_type)) for (param, value) in params.items(): args['data'].write(';{}={}'.format(param, value)) # depends on [control=['for'], data=[]] if content_encoding == 'base64': args['data'].write(';base64,{}'.format(base64.b64decode(data))) # depends on [control=['if'], data=[]] else: args['data'].write(',{}', urllib.quote(data)) args['data'].seek(0)
def SendMessage(self, statement): """Here we're actually capturing messages and putting them into our output""" # The way messages are 'encapsulated' by Rekall is questionable, 99% of the # time it's way better to have a dictionary...shrug... message_type = statement[0] message_data = statement[1] self.output.append({'type': message_type, 'data': message_data})
def function[SendMessage, parameter[self, statement]]: constant[Here we're actually capturing messages and putting them into our output] variable[message_type] assign[=] call[name[statement]][constant[0]] variable[message_data] assign[=] call[name[statement]][constant[1]] call[name[self].output.append, parameter[dictionary[[<ast.Constant object at 0x7da18eb54640>, <ast.Constant object at 0x7da18eb569e0>], [<ast.Name object at 0x7da18eb56b90>, <ast.Name object at 0x7da18eb545b0>]]]]
keyword[def] identifier[SendMessage] ( identifier[self] , identifier[statement] ): literal[string] identifier[message_type] = identifier[statement] [ literal[int] ] identifier[message_data] = identifier[statement] [ literal[int] ] identifier[self] . identifier[output] . identifier[append] ({ literal[string] : identifier[message_type] , literal[string] : identifier[message_data] })
def SendMessage(self, statement): """Here we're actually capturing messages and putting them into our output""" # The way messages are 'encapsulated' by Rekall is questionable, 99% of the # time it's way better to have a dictionary...shrug... message_type = statement[0] message_data = statement[1] self.output.append({'type': message_type, 'data': message_data})
def get_observations(params: Dict) -> Dict[str, Any]: """Search observations, see: http://api.inaturalist.org/v1/docs/#!/Observations/get_observations. Returns the parsed JSON returned by iNaturalist (observations in r['results'], a list of dicts) """ r = make_inaturalist_api_get_call('observations', params=params) return r.json()
def function[get_observations, parameter[params]]: constant[Search observations, see: http://api.inaturalist.org/v1/docs/#!/Observations/get_observations. Returns the parsed JSON returned by iNaturalist (observations in r['results'], a list of dicts) ] variable[r] assign[=] call[name[make_inaturalist_api_get_call], parameter[constant[observations]]] return[call[name[r].json, parameter[]]]
keyword[def] identifier[get_observations] ( identifier[params] : identifier[Dict] )-> identifier[Dict] [ identifier[str] , identifier[Any] ]: literal[string] identifier[r] = identifier[make_inaturalist_api_get_call] ( literal[string] , identifier[params] = identifier[params] ) keyword[return] identifier[r] . identifier[json] ()
def get_observations(params: Dict) -> Dict[str, Any]: """Search observations, see: http://api.inaturalist.org/v1/docs/#!/Observations/get_observations. Returns the parsed JSON returned by iNaturalist (observations in r['results'], a list of dicts) """ r = make_inaturalist_api_get_call('observations', params=params) return r.json()
def CreateGRRTempFile(filename=None, lifetime=0, mode="w+b", suffix=""): """Open file with GRR prefix in directory to allow easy deletion. Missing parent dirs will be created. If an existing directory is specified its permissions won't be modified to avoid breaking system functionality. Permissions on the destination file will be set to root/SYSTEM rw. On windows the file is created, then permissions are set. So there is potentially a race condition where the file is readable by other users. If the caller doesn't specify a directory on windows we use the directory we are executing from as a safe default. If lifetime is specified a housekeeping thread is created to delete the file after lifetime seconds. Files won't be deleted by default. Args: filename: The name of the file to use. Note that setting both filename and directory name is not allowed. lifetime: time in seconds before we should delete this tempfile. mode: The mode to open the file. suffix: optional suffix to use for the temp file Returns: Python file object Raises: OSError: on permission denied ErrorBadPath: if path is not absolute ValueError: if Client.tempfile_prefix is undefined in the config. """ directory = GetDefaultGRRTempDirectory() EnsureTempDirIsSane(directory) prefix = config.CONFIG.Get("Client.tempfile_prefix") if filename is None: outfile = tempfile.NamedTemporaryFile( prefix=prefix, suffix=suffix, dir=directory, delete=False) else: if filename.startswith("/") or filename.startswith("\\"): raise ValueError("Filename must be relative") if suffix: filename = "%s.%s" % (filename, suffix) outfile = open(os.path.join(directory, filename), mode) if lifetime > 0: cleanup = threading.Timer(lifetime, DeleteGRRTempFile, (outfile.name,)) cleanup.start() # Fix perms on the file, since this code is used for writing executable blobs # we apply RWX. if sys.platform == "win32": from grr_response_client import client_utils_windows # pylint: disable=g-import-not-at-top client_utils_windows.WinChmod(outfile.name, ["FILE_ALL_ACCESS"]) else: os.chmod(outfile.name, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR) return outfile
def function[CreateGRRTempFile, parameter[filename, lifetime, mode, suffix]]: constant[Open file with GRR prefix in directory to allow easy deletion. Missing parent dirs will be created. If an existing directory is specified its permissions won't be modified to avoid breaking system functionality. Permissions on the destination file will be set to root/SYSTEM rw. On windows the file is created, then permissions are set. So there is potentially a race condition where the file is readable by other users. If the caller doesn't specify a directory on windows we use the directory we are executing from as a safe default. If lifetime is specified a housekeeping thread is created to delete the file after lifetime seconds. Files won't be deleted by default. Args: filename: The name of the file to use. Note that setting both filename and directory name is not allowed. lifetime: time in seconds before we should delete this tempfile. mode: The mode to open the file. suffix: optional suffix to use for the temp file Returns: Python file object Raises: OSError: on permission denied ErrorBadPath: if path is not absolute ValueError: if Client.tempfile_prefix is undefined in the config. ] variable[directory] assign[=] call[name[GetDefaultGRRTempDirectory], parameter[]] call[name[EnsureTempDirIsSane], parameter[name[directory]]] variable[prefix] assign[=] call[name[config].CONFIG.Get, parameter[constant[Client.tempfile_prefix]]] if compare[name[filename] is constant[None]] begin[:] variable[outfile] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]] if compare[name[lifetime] greater[>] constant[0]] begin[:] variable[cleanup] assign[=] call[name[threading].Timer, parameter[name[lifetime], name[DeleteGRRTempFile], tuple[[<ast.Attribute object at 0x7da1b1b85270>]]]] call[name[cleanup].start, parameter[]] if compare[name[sys].platform equal[==] constant[win32]] begin[:] from relative_module[grr_response_client] import module[client_utils_windows] call[name[client_utils_windows].WinChmod, parameter[name[outfile].name, list[[<ast.Constant object at 0x7da1b1c0cc70>]]]] return[name[outfile]]
keyword[def] identifier[CreateGRRTempFile] ( identifier[filename] = keyword[None] , identifier[lifetime] = literal[int] , identifier[mode] = literal[string] , identifier[suffix] = literal[string] ): literal[string] identifier[directory] = identifier[GetDefaultGRRTempDirectory] () identifier[EnsureTempDirIsSane] ( identifier[directory] ) identifier[prefix] = identifier[config] . identifier[CONFIG] . identifier[Get] ( literal[string] ) keyword[if] identifier[filename] keyword[is] keyword[None] : identifier[outfile] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[prefix] = identifier[prefix] , identifier[suffix] = identifier[suffix] , identifier[dir] = identifier[directory] , identifier[delete] = keyword[False] ) keyword[else] : keyword[if] identifier[filename] . identifier[startswith] ( literal[string] ) keyword[or] identifier[filename] . identifier[startswith] ( literal[string] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[suffix] : identifier[filename] = literal[string] %( identifier[filename] , identifier[suffix] ) identifier[outfile] = identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[filename] ), identifier[mode] ) keyword[if] identifier[lifetime] > literal[int] : identifier[cleanup] = identifier[threading] . identifier[Timer] ( identifier[lifetime] , identifier[DeleteGRRTempFile] ,( identifier[outfile] . identifier[name] ,)) identifier[cleanup] . identifier[start] () keyword[if] identifier[sys] . identifier[platform] == literal[string] : keyword[from] identifier[grr_response_client] keyword[import] identifier[client_utils_windows] identifier[client_utils_windows] . identifier[WinChmod] ( identifier[outfile] . identifier[name] ,[ literal[string] ]) keyword[else] : identifier[os] . identifier[chmod] ( identifier[outfile] . identifier[name] , identifier[stat] . identifier[S_IXUSR] | identifier[stat] . identifier[S_IRUSR] | identifier[stat] . identifier[S_IWUSR] ) keyword[return] identifier[outfile]
def CreateGRRTempFile(filename=None, lifetime=0, mode='w+b', suffix=''): """Open file with GRR prefix in directory to allow easy deletion. Missing parent dirs will be created. If an existing directory is specified its permissions won't be modified to avoid breaking system functionality. Permissions on the destination file will be set to root/SYSTEM rw. On windows the file is created, then permissions are set. So there is potentially a race condition where the file is readable by other users. If the caller doesn't specify a directory on windows we use the directory we are executing from as a safe default. If lifetime is specified a housekeeping thread is created to delete the file after lifetime seconds. Files won't be deleted by default. Args: filename: The name of the file to use. Note that setting both filename and directory name is not allowed. lifetime: time in seconds before we should delete this tempfile. mode: The mode to open the file. suffix: optional suffix to use for the temp file Returns: Python file object Raises: OSError: on permission denied ErrorBadPath: if path is not absolute ValueError: if Client.tempfile_prefix is undefined in the config. """ directory = GetDefaultGRRTempDirectory() EnsureTempDirIsSane(directory) prefix = config.CONFIG.Get('Client.tempfile_prefix') if filename is None: outfile = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory, delete=False) # depends on [control=['if'], data=[]] else: if filename.startswith('/') or filename.startswith('\\'): raise ValueError('Filename must be relative') # depends on [control=['if'], data=[]] if suffix: filename = '%s.%s' % (filename, suffix) # depends on [control=['if'], data=[]] outfile = open(os.path.join(directory, filename), mode) if lifetime > 0: cleanup = threading.Timer(lifetime, DeleteGRRTempFile, (outfile.name,)) cleanup.start() # depends on [control=['if'], data=['lifetime']] # Fix perms on the file, since this code is used for writing executable blobs # we apply RWX. if sys.platform == 'win32': from grr_response_client import client_utils_windows # pylint: disable=g-import-not-at-top client_utils_windows.WinChmod(outfile.name, ['FILE_ALL_ACCESS']) # depends on [control=['if'], data=[]] else: os.chmod(outfile.name, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR) return outfile
def flatten(*sequence): """Flatten nested sequences into one.""" result = [] for entry in sequence: if isinstance(entry, list): result += Select.flatten(*entry) elif isinstance(entry, tuple): result += Select.flatten(*entry) else: result.append(entry) return result
def function[flatten, parameter[]]: constant[Flatten nested sequences into one.] variable[result] assign[=] list[[]] for taget[name[entry]] in starred[name[sequence]] begin[:] if call[name[isinstance], parameter[name[entry], name[list]]] begin[:] <ast.AugAssign object at 0x7da18f58ddb0> return[name[result]]
keyword[def] identifier[flatten] (* identifier[sequence] ): literal[string] identifier[result] =[] keyword[for] identifier[entry] keyword[in] identifier[sequence] : keyword[if] identifier[isinstance] ( identifier[entry] , identifier[list] ): identifier[result] += identifier[Select] . identifier[flatten] (* identifier[entry] ) keyword[elif] identifier[isinstance] ( identifier[entry] , identifier[tuple] ): identifier[result] += identifier[Select] . identifier[flatten] (* identifier[entry] ) keyword[else] : identifier[result] . identifier[append] ( identifier[entry] ) keyword[return] identifier[result]
def flatten(*sequence): """Flatten nested sequences into one.""" result = [] for entry in sequence: if isinstance(entry, list): result += Select.flatten(*entry) # depends on [control=['if'], data=[]] elif isinstance(entry, tuple): result += Select.flatten(*entry) # depends on [control=['if'], data=[]] else: result.append(entry) # depends on [control=['for'], data=['entry']] return result
def init_core(app): """Init core objects.""" from rio import models # noqa db.init_app(app) celery.init_app(app) redis.init_app(app) cache.init_app(app) sentry.init_app(app) graph.init_app(app) setup_migrate(app) setup_user_manager(app)
def function[init_core, parameter[app]]: constant[Init core objects.] from relative_module[rio] import module[models] call[name[db].init_app, parameter[name[app]]] call[name[celery].init_app, parameter[name[app]]] call[name[redis].init_app, parameter[name[app]]] call[name[cache].init_app, parameter[name[app]]] call[name[sentry].init_app, parameter[name[app]]] call[name[graph].init_app, parameter[name[app]]] call[name[setup_migrate], parameter[name[app]]] call[name[setup_user_manager], parameter[name[app]]]
keyword[def] identifier[init_core] ( identifier[app] ): literal[string] keyword[from] identifier[rio] keyword[import] identifier[models] identifier[db] . identifier[init_app] ( identifier[app] ) identifier[celery] . identifier[init_app] ( identifier[app] ) identifier[redis] . identifier[init_app] ( identifier[app] ) identifier[cache] . identifier[init_app] ( identifier[app] ) identifier[sentry] . identifier[init_app] ( identifier[app] ) identifier[graph] . identifier[init_app] ( identifier[app] ) identifier[setup_migrate] ( identifier[app] ) identifier[setup_user_manager] ( identifier[app] )
def init_core(app): """Init core objects.""" from rio import models # noqa db.init_app(app) celery.init_app(app) redis.init_app(app) cache.init_app(app) sentry.init_app(app) graph.init_app(app) setup_migrate(app) setup_user_manager(app)
def add_device(self, **kwargs): """Add a new device to catalog. .. code-block:: python device = { "mechanism": "connector", "certificate_fingerprint": "<certificate>", "name": "New device name", "certificate_issuer_id": "<id>" } resp = api.add_device(**device) print(resp.created_at) :param str certificate_fingerprint: Fingerprint of the device certificate :param str certificate_issuer_id: ID of the issuer of the certificate :param str name: The name of the device :param str account_id: The owning Identity and Access Managment (IAM) account ID :param obj custom_attributes: Up to 5 custom JSON attributes :param str description: The description of the device :param str device_class: Class of the device :param str id: The ID of the device :param str manifest_url: URL for the current device manifest :param str mechanism: The ID of the channel used to communicate with the device :param str mechanism_url: The address of the connector to use :param str serial_number: The serial number of the device :param str state: The current state of the device :param int trust_class: The device trust class :param str vendor_id: The device vendor ID :param str alias: The alias of the device :parama str device_type: The endpoint type of the device - e.g. if the device is a gateway :param str host_gateway: The endpoint_name of the host gateway, if appropriate :param datetime bootstrap_certificate_expiration: :param datetime connector_certificate_expiration: Expiration date of the certificate used to connect to connector server :param int device_execution_mode: The device class :param str firmware_checksum: The SHA256 checksum of the current firmware image :param datetime manifest_timestamp: The timestamp of the current manifest version :return: the newly created device object. :rtype: Device """ api = self._get_api(device_directory.DefaultApi) device = Device._create_request_map(kwargs) device = DeviceData(**device) return Device(api.device_create(device))
def function[add_device, parameter[self]]: constant[Add a new device to catalog. .. code-block:: python device = { "mechanism": "connector", "certificate_fingerprint": "<certificate>", "name": "New device name", "certificate_issuer_id": "<id>" } resp = api.add_device(**device) print(resp.created_at) :param str certificate_fingerprint: Fingerprint of the device certificate :param str certificate_issuer_id: ID of the issuer of the certificate :param str name: The name of the device :param str account_id: The owning Identity and Access Managment (IAM) account ID :param obj custom_attributes: Up to 5 custom JSON attributes :param str description: The description of the device :param str device_class: Class of the device :param str id: The ID of the device :param str manifest_url: URL for the current device manifest :param str mechanism: The ID of the channel used to communicate with the device :param str mechanism_url: The address of the connector to use :param str serial_number: The serial number of the device :param str state: The current state of the device :param int trust_class: The device trust class :param str vendor_id: The device vendor ID :param str alias: The alias of the device :parama str device_type: The endpoint type of the device - e.g. if the device is a gateway :param str host_gateway: The endpoint_name of the host gateway, if appropriate :param datetime bootstrap_certificate_expiration: :param datetime connector_certificate_expiration: Expiration date of the certificate used to connect to connector server :param int device_execution_mode: The device class :param str firmware_checksum: The SHA256 checksum of the current firmware image :param datetime manifest_timestamp: The timestamp of the current manifest version :return: the newly created device object. :rtype: Device ] variable[api] assign[=] call[name[self]._get_api, parameter[name[device_directory].DefaultApi]] variable[device] assign[=] call[name[Device]._create_request_map, parameter[name[kwargs]]] variable[device] assign[=] call[name[DeviceData], parameter[]] return[call[name[Device], parameter[call[name[api].device_create, parameter[name[device]]]]]]
keyword[def] identifier[add_device] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[api] = identifier[self] . identifier[_get_api] ( identifier[device_directory] . identifier[DefaultApi] ) identifier[device] = identifier[Device] . identifier[_create_request_map] ( identifier[kwargs] ) identifier[device] = identifier[DeviceData] (** identifier[device] ) keyword[return] identifier[Device] ( identifier[api] . identifier[device_create] ( identifier[device] ))
def add_device(self, **kwargs): """Add a new device to catalog. .. code-block:: python device = { "mechanism": "connector", "certificate_fingerprint": "<certificate>", "name": "New device name", "certificate_issuer_id": "<id>" } resp = api.add_device(**device) print(resp.created_at) :param str certificate_fingerprint: Fingerprint of the device certificate :param str certificate_issuer_id: ID of the issuer of the certificate :param str name: The name of the device :param str account_id: The owning Identity and Access Managment (IAM) account ID :param obj custom_attributes: Up to 5 custom JSON attributes :param str description: The description of the device :param str device_class: Class of the device :param str id: The ID of the device :param str manifest_url: URL for the current device manifest :param str mechanism: The ID of the channel used to communicate with the device :param str mechanism_url: The address of the connector to use :param str serial_number: The serial number of the device :param str state: The current state of the device :param int trust_class: The device trust class :param str vendor_id: The device vendor ID :param str alias: The alias of the device :parama str device_type: The endpoint type of the device - e.g. if the device is a gateway :param str host_gateway: The endpoint_name of the host gateway, if appropriate :param datetime bootstrap_certificate_expiration: :param datetime connector_certificate_expiration: Expiration date of the certificate used to connect to connector server :param int device_execution_mode: The device class :param str firmware_checksum: The SHA256 checksum of the current firmware image :param datetime manifest_timestamp: The timestamp of the current manifest version :return: the newly created device object. :rtype: Device """ api = self._get_api(device_directory.DefaultApi) device = Device._create_request_map(kwargs) device = DeviceData(**device) return Device(api.device_create(device))
def gaussian_prior_model_for_arguments(self, arguments): """ Create a new galaxy prior from a set of arguments, replacing the priors of some of this galaxy prior's prior models with new arguments. Parameters ---------- arguments: dict A dictionary mapping_matrix between old priors and their replacements. Returns ------- new_model: GalaxyModel A model with some or all priors replaced. """ new_model = copy.deepcopy(self) for key, value in filter(lambda t: isinstance(t[1], pm.PriorModel), self.__dict__.items()): setattr(new_model, key, value.gaussian_prior_model_for_arguments(arguments)) return new_model
def function[gaussian_prior_model_for_arguments, parameter[self, arguments]]: constant[ Create a new galaxy prior from a set of arguments, replacing the priors of some of this galaxy prior's prior models with new arguments. Parameters ---------- arguments: dict A dictionary mapping_matrix between old priors and their replacements. Returns ------- new_model: GalaxyModel A model with some or all priors replaced. ] variable[new_model] assign[=] call[name[copy].deepcopy, parameter[name[self]]] for taget[tuple[[<ast.Name object at 0x7da204621ba0>, <ast.Name object at 0x7da204622410>]]] in starred[call[name[filter], parameter[<ast.Lambda object at 0x7da204620d60>, call[name[self].__dict__.items, parameter[]]]]] begin[:] call[name[setattr], parameter[name[new_model], name[key], call[name[value].gaussian_prior_model_for_arguments, parameter[name[arguments]]]]] return[name[new_model]]
keyword[def] identifier[gaussian_prior_model_for_arguments] ( identifier[self] , identifier[arguments] ): literal[string] identifier[new_model] = identifier[copy] . identifier[deepcopy] ( identifier[self] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[filter] ( keyword[lambda] identifier[t] : identifier[isinstance] ( identifier[t] [ literal[int] ], identifier[pm] . identifier[PriorModel] ), identifier[self] . identifier[__dict__] . identifier[items] ()): identifier[setattr] ( identifier[new_model] , identifier[key] , identifier[value] . identifier[gaussian_prior_model_for_arguments] ( identifier[arguments] )) keyword[return] identifier[new_model]
def gaussian_prior_model_for_arguments(self, arguments): """ Create a new galaxy prior from a set of arguments, replacing the priors of some of this galaxy prior's prior models with new arguments. Parameters ---------- arguments: dict A dictionary mapping_matrix between old priors and their replacements. Returns ------- new_model: GalaxyModel A model with some or all priors replaced. """ new_model = copy.deepcopy(self) for (key, value) in filter(lambda t: isinstance(t[1], pm.PriorModel), self.__dict__.items()): setattr(new_model, key, value.gaussian_prior_model_for_arguments(arguments)) # depends on [control=['for'], data=[]] return new_model
def get_code(self): """Return the code embedded in the JSON error response body, or an empty string if the JSON couldn't be parsed. This should always match the 'http_response'.""" result = '' if self._data_struct is not None: result = self._data_struct[KEY_CODE] return result
def function[get_code, parameter[self]]: constant[Return the code embedded in the JSON error response body, or an empty string if the JSON couldn't be parsed. This should always match the 'http_response'.] variable[result] assign[=] constant[] if compare[name[self]._data_struct is_not constant[None]] begin[:] variable[result] assign[=] call[name[self]._data_struct][name[KEY_CODE]] return[name[result]]
keyword[def] identifier[get_code] ( identifier[self] ): literal[string] identifier[result] = literal[string] keyword[if] identifier[self] . identifier[_data_struct] keyword[is] keyword[not] keyword[None] : identifier[result] = identifier[self] . identifier[_data_struct] [ identifier[KEY_CODE] ] keyword[return] identifier[result]
def get_code(self): """Return the code embedded in the JSON error response body, or an empty string if the JSON couldn't be parsed. This should always match the 'http_response'.""" result = '' if self._data_struct is not None: result = self._data_struct[KEY_CODE] # depends on [control=['if'], data=[]] return result
def unwrap_self_for_multiprocessing(arg): """ You can not call methods with multiprocessing, but free functions, If you want to call inst.method(arg0, arg1), unwrap_self_for_multiprocessing(inst, "method", (arg0, arg1)) does the trick. """ (inst, method_name, args) = arg return getattr(inst, method_name)(*args)
def function[unwrap_self_for_multiprocessing, parameter[arg]]: constant[ You can not call methods with multiprocessing, but free functions, If you want to call inst.method(arg0, arg1), unwrap_self_for_multiprocessing(inst, "method", (arg0, arg1)) does the trick. ] <ast.Tuple object at 0x7da18f721600> assign[=] name[arg] return[call[call[name[getattr], parameter[name[inst], name[method_name]]], parameter[<ast.Starred object at 0x7da18f7233a0>]]]
keyword[def] identifier[unwrap_self_for_multiprocessing] ( identifier[arg] ): literal[string] ( identifier[inst] , identifier[method_name] , identifier[args] )= identifier[arg] keyword[return] identifier[getattr] ( identifier[inst] , identifier[method_name] )(* identifier[args] )
def unwrap_self_for_multiprocessing(arg): """ You can not call methods with multiprocessing, but free functions, If you want to call inst.method(arg0, arg1), unwrap_self_for_multiprocessing(inst, "method", (arg0, arg1)) does the trick. """ (inst, method_name, args) = arg return getattr(inst, method_name)(*args)
def split_key_string(key): """Splits a key string (of the form, e.g. ``'C# major'``), into a tuple of ``(key, mode)`` where ``key`` is is an integer representing the semitone distance from C. Parameters ---------- key : str String representing a key. Returns ------- key : int Number of semitones above C. mode : str String representing the mode. """ key, mode = key.split() return KEY_TO_SEMITONE[key.lower()], mode
def function[split_key_string, parameter[key]]: constant[Splits a key string (of the form, e.g. ``'C# major'``), into a tuple of ``(key, mode)`` where ``key`` is is an integer representing the semitone distance from C. Parameters ---------- key : str String representing a key. Returns ------- key : int Number of semitones above C. mode : str String representing the mode. ] <ast.Tuple object at 0x7da1b0ff1990> assign[=] call[name[key].split, parameter[]] return[tuple[[<ast.Subscript object at 0x7da1b0ff16f0>, <ast.Name object at 0x7da1b0ff02b0>]]]
keyword[def] identifier[split_key_string] ( identifier[key] ): literal[string] identifier[key] , identifier[mode] = identifier[key] . identifier[split] () keyword[return] identifier[KEY_TO_SEMITONE] [ identifier[key] . identifier[lower] ()], identifier[mode]
def split_key_string(key): """Splits a key string (of the form, e.g. ``'C# major'``), into a tuple of ``(key, mode)`` where ``key`` is is an integer representing the semitone distance from C. Parameters ---------- key : str String representing a key. Returns ------- key : int Number of semitones above C. mode : str String representing the mode. """ (key, mode) = key.split() return (KEY_TO_SEMITONE[key.lower()], mode)
def _get_cmap(kwargs: dict) -> colors.Colormap: """Get the colour map for plots that support it. Parameters ---------- cmap : str or colors.Colormap or list of colors A map or an instance of cmap. This can also be a seaborn palette (if seaborn is installed). """ from matplotlib.colors import ListedColormap cmap = kwargs.pop("cmap", default_cmap) if isinstance(cmap, list): return ListedColormap(cmap) if isinstance(cmap, str): try: cmap = plt.get_cmap(cmap) except BaseException as exc: try: # Try to use seaborn palette import seaborn as sns sns_palette = sns.color_palette(cmap, n_colors=256) cmap = ListedColormap(sns_palette, name=cmap) except ImportError: raise exc return cmap
def function[_get_cmap, parameter[kwargs]]: constant[Get the colour map for plots that support it. Parameters ---------- cmap : str or colors.Colormap or list of colors A map or an instance of cmap. This can also be a seaborn palette (if seaborn is installed). ] from relative_module[matplotlib.colors] import module[ListedColormap] variable[cmap] assign[=] call[name[kwargs].pop, parameter[constant[cmap], name[default_cmap]]] if call[name[isinstance], parameter[name[cmap], name[list]]] begin[:] return[call[name[ListedColormap], parameter[name[cmap]]]] if call[name[isinstance], parameter[name[cmap], name[str]]] begin[:] <ast.Try object at 0x7da207f022f0> return[name[cmap]]
keyword[def] identifier[_get_cmap] ( identifier[kwargs] : identifier[dict] )-> identifier[colors] . identifier[Colormap] : literal[string] keyword[from] identifier[matplotlib] . identifier[colors] keyword[import] identifier[ListedColormap] identifier[cmap] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[default_cmap] ) keyword[if] identifier[isinstance] ( identifier[cmap] , identifier[list] ): keyword[return] identifier[ListedColormap] ( identifier[cmap] ) keyword[if] identifier[isinstance] ( identifier[cmap] , identifier[str] ): keyword[try] : identifier[cmap] = identifier[plt] . identifier[get_cmap] ( identifier[cmap] ) keyword[except] identifier[BaseException] keyword[as] identifier[exc] : keyword[try] : keyword[import] identifier[seaborn] keyword[as] identifier[sns] identifier[sns_palette] = identifier[sns] . identifier[color_palette] ( identifier[cmap] , identifier[n_colors] = literal[int] ) identifier[cmap] = identifier[ListedColormap] ( identifier[sns_palette] , identifier[name] = identifier[cmap] ) keyword[except] identifier[ImportError] : keyword[raise] identifier[exc] keyword[return] identifier[cmap]
def _get_cmap(kwargs: dict) -> colors.Colormap: """Get the colour map for plots that support it. Parameters ---------- cmap : str or colors.Colormap or list of colors A map or an instance of cmap. This can also be a seaborn palette (if seaborn is installed). """ from matplotlib.colors import ListedColormap cmap = kwargs.pop('cmap', default_cmap) if isinstance(cmap, list): return ListedColormap(cmap) # depends on [control=['if'], data=[]] if isinstance(cmap, str): try: cmap = plt.get_cmap(cmap) # depends on [control=['try'], data=[]] except BaseException as exc: try: # Try to use seaborn palette import seaborn as sns sns_palette = sns.color_palette(cmap, n_colors=256) cmap = ListedColormap(sns_palette, name=cmap) # depends on [control=['try'], data=[]] except ImportError: raise exc # depends on [control=['except'], data=[]] # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]] return cmap
def locale_first_weekday(): """figure if week starts on monday or sunday""" first_weekday = 6 #by default settle on monday try: process = os.popen("locale first_weekday week-1stday") week_offset, week_start = process.read().split('\n')[:2] process.close() week_start = dt.date(*time.strptime(week_start, "%Y%m%d")[:3]) week_offset = dt.timedelta(int(week_offset) - 1) beginning = week_start + week_offset first_weekday = int(beginning.strftime("%w")) except: logger.warn("WARNING - Failed to get first weekday from locale") return first_weekday
def function[locale_first_weekday, parameter[]]: constant[figure if week starts on monday or sunday] variable[first_weekday] assign[=] constant[6] <ast.Try object at 0x7da2049617b0> return[name[first_weekday]]
keyword[def] identifier[locale_first_weekday] (): literal[string] identifier[first_weekday] = literal[int] keyword[try] : identifier[process] = identifier[os] . identifier[popen] ( literal[string] ) identifier[week_offset] , identifier[week_start] = identifier[process] . identifier[read] (). identifier[split] ( literal[string] )[: literal[int] ] identifier[process] . identifier[close] () identifier[week_start] = identifier[dt] . identifier[date] (* identifier[time] . identifier[strptime] ( identifier[week_start] , literal[string] )[: literal[int] ]) identifier[week_offset] = identifier[dt] . identifier[timedelta] ( identifier[int] ( identifier[week_offset] )- literal[int] ) identifier[beginning] = identifier[week_start] + identifier[week_offset] identifier[first_weekday] = identifier[int] ( identifier[beginning] . identifier[strftime] ( literal[string] )) keyword[except] : identifier[logger] . identifier[warn] ( literal[string] ) keyword[return] identifier[first_weekday]
def locale_first_weekday(): """figure if week starts on monday or sunday""" first_weekday = 6 #by default settle on monday try: process = os.popen('locale first_weekday week-1stday') (week_offset, week_start) = process.read().split('\n')[:2] process.close() week_start = dt.date(*time.strptime(week_start, '%Y%m%d')[:3]) week_offset = dt.timedelta(int(week_offset) - 1) beginning = week_start + week_offset first_weekday = int(beginning.strftime('%w')) # depends on [control=['try'], data=[]] except: logger.warn('WARNING - Failed to get first weekday from locale') # depends on [control=['except'], data=[]] return first_weekday
def clean(): """remove build artifacts""" shutil.rmtree('{PROJECT_NAME}.egg-info'.format(PROJECT_NAME=PROJECT_NAME), ignore_errors=True) shutil.rmtree('build', ignore_errors=True) shutil.rmtree('dist', ignore_errors=True) shutil.rmtree('htmlcov', ignore_errors=True) shutil.rmtree('__pycache__', ignore_errors=True)
def function[clean, parameter[]]: constant[remove build artifacts] call[name[shutil].rmtree, parameter[call[constant[{PROJECT_NAME}.egg-info].format, parameter[]]]] call[name[shutil].rmtree, parameter[constant[build]]] call[name[shutil].rmtree, parameter[constant[dist]]] call[name[shutil].rmtree, parameter[constant[htmlcov]]] call[name[shutil].rmtree, parameter[constant[__pycache__]]]
keyword[def] identifier[clean] (): literal[string] identifier[shutil] . identifier[rmtree] ( literal[string] . identifier[format] ( identifier[PROJECT_NAME] = identifier[PROJECT_NAME] ), identifier[ignore_errors] = keyword[True] ) identifier[shutil] . identifier[rmtree] ( literal[string] , identifier[ignore_errors] = keyword[True] ) identifier[shutil] . identifier[rmtree] ( literal[string] , identifier[ignore_errors] = keyword[True] ) identifier[shutil] . identifier[rmtree] ( literal[string] , identifier[ignore_errors] = keyword[True] ) identifier[shutil] . identifier[rmtree] ( literal[string] , identifier[ignore_errors] = keyword[True] )
def clean(): """remove build artifacts""" shutil.rmtree('{PROJECT_NAME}.egg-info'.format(PROJECT_NAME=PROJECT_NAME), ignore_errors=True) shutil.rmtree('build', ignore_errors=True) shutil.rmtree('dist', ignore_errors=True) shutil.rmtree('htmlcov', ignore_errors=True) shutil.rmtree('__pycache__', ignore_errors=True)
def safe(self, parentheses=True): """ Returns a string representation with special characters replaced by safer characters for use in file names. """ if not self: return "" string = str(self) string = string.replace("**", "_pow_") string = string.replace("*", "_mul_") string = string.replace("/", "_div_") string = string.replace("==", "_eq_") string = string.replace("<=", "_leq_") string = string.replace(">=", "_geq_") string = string.replace("<", "_lt_") string = string.replace(">", "_gt_") string = string.replace("&&", "_and_") string = string.replace("||", "_or_") string = string.replace("!", "not_") if parentheses: string = string.replace("(", "L") string = string.replace(")", "R") else: string = string.replace("(", "") string = string.replace(")", "") string = string.replace(" ", "") return string
def function[safe, parameter[self, parentheses]]: constant[ Returns a string representation with special characters replaced by safer characters for use in file names. ] if <ast.UnaryOp object at 0x7da1b11bf220> begin[:] return[constant[]] variable[string] assign[=] call[name[str], parameter[name[self]]] variable[string] assign[=] call[name[string].replace, parameter[constant[**], constant[_pow_]]] variable[string] assign[=] call[name[string].replace, parameter[constant[*], constant[_mul_]]] variable[string] assign[=] call[name[string].replace, parameter[constant[/], constant[_div_]]] variable[string] assign[=] call[name[string].replace, parameter[constant[==], constant[_eq_]]] variable[string] assign[=] call[name[string].replace, parameter[constant[<=], constant[_leq_]]] variable[string] assign[=] call[name[string].replace, parameter[constant[>=], constant[_geq_]]] variable[string] assign[=] call[name[string].replace, parameter[constant[<], constant[_lt_]]] variable[string] assign[=] call[name[string].replace, parameter[constant[>], constant[_gt_]]] variable[string] assign[=] call[name[string].replace, parameter[constant[&&], constant[_and_]]] variable[string] assign[=] call[name[string].replace, parameter[constant[||], constant[_or_]]] variable[string] assign[=] call[name[string].replace, parameter[constant[!], constant[not_]]] if name[parentheses] begin[:] variable[string] assign[=] call[name[string].replace, parameter[constant[(], constant[L]]] variable[string] assign[=] call[name[string].replace, parameter[constant[)], constant[R]]] variable[string] assign[=] call[name[string].replace, parameter[constant[ ], constant[]]] return[name[string]]
keyword[def] identifier[safe] ( identifier[self] , identifier[parentheses] = keyword[True] ): literal[string] keyword[if] keyword[not] identifier[self] : keyword[return] literal[string] identifier[string] = identifier[str] ( identifier[self] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[parentheses] : identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) keyword[else] : identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) identifier[string] = identifier[string] . identifier[replace] ( literal[string] , literal[string] ) keyword[return] identifier[string]
def safe(self, parentheses=True): """ Returns a string representation with special characters replaced by safer characters for use in file names. """ if not self: return '' # depends on [control=['if'], data=[]] string = str(self) string = string.replace('**', '_pow_') string = string.replace('*', '_mul_') string = string.replace('/', '_div_') string = string.replace('==', '_eq_') string = string.replace('<=', '_leq_') string = string.replace('>=', '_geq_') string = string.replace('<', '_lt_') string = string.replace('>', '_gt_') string = string.replace('&&', '_and_') string = string.replace('||', '_or_') string = string.replace('!', 'not_') if parentheses: string = string.replace('(', 'L') string = string.replace(')', 'R') # depends on [control=['if'], data=[]] else: string = string.replace('(', '') string = string.replace(')', '') string = string.replace(' ', '') return string
def maybe_rate_limit(client, headers, atexit=False): """Optionally pause the process based on suggested rate interval.""" # pylint: disable=fixme # pylint: disable=global-statement # FIXME: Yes, I know this is not great. We'll fix it later. :-) global LAST_CLIENT, LAST_HEADERS if LAST_CLIENT and LAST_HEADERS: # Wait based on previous client/headers rate_limit(LAST_CLIENT, LAST_HEADERS, atexit=atexit) LAST_CLIENT = copy.copy(client) LAST_HEADERS = copy.copy(headers)
def function[maybe_rate_limit, parameter[client, headers, atexit]]: constant[Optionally pause the process based on suggested rate interval.] <ast.Global object at 0x7da1b19c0d30> if <ast.BoolOp object at 0x7da1b19c0490> begin[:] call[name[rate_limit], parameter[name[LAST_CLIENT], name[LAST_HEADERS]]] variable[LAST_CLIENT] assign[=] call[name[copy].copy, parameter[name[client]]] variable[LAST_HEADERS] assign[=] call[name[copy].copy, parameter[name[headers]]]
keyword[def] identifier[maybe_rate_limit] ( identifier[client] , identifier[headers] , identifier[atexit] = keyword[False] ): literal[string] keyword[global] identifier[LAST_CLIENT] , identifier[LAST_HEADERS] keyword[if] identifier[LAST_CLIENT] keyword[and] identifier[LAST_HEADERS] : identifier[rate_limit] ( identifier[LAST_CLIENT] , identifier[LAST_HEADERS] , identifier[atexit] = identifier[atexit] ) identifier[LAST_CLIENT] = identifier[copy] . identifier[copy] ( identifier[client] ) identifier[LAST_HEADERS] = identifier[copy] . identifier[copy] ( identifier[headers] )
def maybe_rate_limit(client, headers, atexit=False): """Optionally pause the process based on suggested rate interval.""" # pylint: disable=fixme # pylint: disable=global-statement # FIXME: Yes, I know this is not great. We'll fix it later. :-) global LAST_CLIENT, LAST_HEADERS if LAST_CLIENT and LAST_HEADERS: # Wait based on previous client/headers rate_limit(LAST_CLIENT, LAST_HEADERS, atexit=atexit) # depends on [control=['if'], data=[]] LAST_CLIENT = copy.copy(client) LAST_HEADERS = copy.copy(headers)
async def rename(self, name): """Rename this conversation. Hangouts only officially supports renaming group conversations, so custom names for one-to-one conversations may or may not appear in all first party clients. Args: name (str): New name. Raises: .NetworkError: If conversation cannot be renamed. """ await self._client.rename_conversation( hangouts_pb2.RenameConversationRequest( request_header=self._client.get_request_header(), new_name=name, event_request_header=self._get_event_request_header(), ) )
<ast.AsyncFunctionDef object at 0x7da20c6c5210>
keyword[async] keyword[def] identifier[rename] ( identifier[self] , identifier[name] ): literal[string] keyword[await] identifier[self] . identifier[_client] . identifier[rename_conversation] ( identifier[hangouts_pb2] . identifier[RenameConversationRequest] ( identifier[request_header] = identifier[self] . identifier[_client] . identifier[get_request_header] (), identifier[new_name] = identifier[name] , identifier[event_request_header] = identifier[self] . identifier[_get_event_request_header] (), ) )
async def rename(self, name): """Rename this conversation. Hangouts only officially supports renaming group conversations, so custom names for one-to-one conversations may or may not appear in all first party clients. Args: name (str): New name. Raises: .NetworkError: If conversation cannot be renamed. """ await self._client.rename_conversation(hangouts_pb2.RenameConversationRequest(request_header=self._client.get_request_header(), new_name=name, event_request_header=self._get_event_request_header()))
def assign_complex_to_samples(items): """Assign complex inputs like variants and align outputs to samples. Handles list inputs to record conversion where we have inputs from multiple locations and need to ensure they are properly assigned to samples in many environments. The unpleasant approach here is to use standard file naming to match with samples so this can work in environments where we don't download/stream the input files (for space/time savings). """ extract_fns = {("variants", "samples"): _get_vcf_samples, ("align_bam",): _get_bam_samples} complex = {k: {} for k in extract_fns.keys()} for data in items: for k in complex: v = tz.get_in(k, data) if v is not None: for s in extract_fns[k](v, items): if s: complex[k][s] = v out = [] for data in items: for k in complex: newv = tz.get_in([k, dd.get_sample_name(data)], complex) if newv: data = tz.update_in(data, k, lambda x: newv) out.append(data) return out
def function[assign_complex_to_samples, parameter[items]]: constant[Assign complex inputs like variants and align outputs to samples. Handles list inputs to record conversion where we have inputs from multiple locations and need to ensure they are properly assigned to samples in many environments. The unpleasant approach here is to use standard file naming to match with samples so this can work in environments where we don't download/stream the input files (for space/time savings). ] variable[extract_fns] assign[=] dictionary[[<ast.Tuple object at 0x7da1b2345db0>, <ast.Tuple object at 0x7da1b23450f0>], [<ast.Name object at 0x7da1b2347280>, <ast.Name object at 0x7da1b2346ec0>]] variable[complex] assign[=] <ast.DictComp object at 0x7da1b2346680> for taget[name[data]] in starred[name[items]] begin[:] for taget[name[k]] in starred[name[complex]] begin[:] variable[v] assign[=] call[name[tz].get_in, parameter[name[k], name[data]]] if compare[name[v] is_not constant[None]] begin[:] for taget[name[s]] in starred[call[call[name[extract_fns]][name[k]], parameter[name[v], name[items]]]] begin[:] if name[s] begin[:] call[call[name[complex]][name[k]]][name[s]] assign[=] name[v] variable[out] assign[=] list[[]] for taget[name[data]] in starred[name[items]] begin[:] for taget[name[k]] in starred[name[complex]] begin[:] variable[newv] assign[=] call[name[tz].get_in, parameter[list[[<ast.Name object at 0x7da1b18941c0>, <ast.Call object at 0x7da1b18940a0>]], name[complex]]] if name[newv] begin[:] variable[data] assign[=] call[name[tz].update_in, parameter[name[data], name[k], <ast.Lambda object at 0x7da1b1894b80>]] call[name[out].append, parameter[name[data]]] return[name[out]]
keyword[def] identifier[assign_complex_to_samples] ( identifier[items] ): literal[string] identifier[extract_fns] ={( literal[string] , literal[string] ): identifier[_get_vcf_samples] , ( literal[string] ,): identifier[_get_bam_samples] } identifier[complex] ={ identifier[k] :{} keyword[for] identifier[k] keyword[in] identifier[extract_fns] . identifier[keys] ()} keyword[for] identifier[data] keyword[in] identifier[items] : keyword[for] identifier[k] keyword[in] identifier[complex] : identifier[v] = identifier[tz] . identifier[get_in] ( identifier[k] , identifier[data] ) keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[s] keyword[in] identifier[extract_fns] [ identifier[k] ]( identifier[v] , identifier[items] ): keyword[if] identifier[s] : identifier[complex] [ identifier[k] ][ identifier[s] ]= identifier[v] identifier[out] =[] keyword[for] identifier[data] keyword[in] identifier[items] : keyword[for] identifier[k] keyword[in] identifier[complex] : identifier[newv] = identifier[tz] . identifier[get_in] ([ identifier[k] , identifier[dd] . identifier[get_sample_name] ( identifier[data] )], identifier[complex] ) keyword[if] identifier[newv] : identifier[data] = identifier[tz] . identifier[update_in] ( identifier[data] , identifier[k] , keyword[lambda] identifier[x] : identifier[newv] ) identifier[out] . identifier[append] ( identifier[data] ) keyword[return] identifier[out]
def assign_complex_to_samples(items): """Assign complex inputs like variants and align outputs to samples. Handles list inputs to record conversion where we have inputs from multiple locations and need to ensure they are properly assigned to samples in many environments. The unpleasant approach here is to use standard file naming to match with samples so this can work in environments where we don't download/stream the input files (for space/time savings). """ extract_fns = {('variants', 'samples'): _get_vcf_samples, ('align_bam',): _get_bam_samples} complex = {k: {} for k in extract_fns.keys()} for data in items: for k in complex: v = tz.get_in(k, data) if v is not None: for s in extract_fns[k](v, items): if s: complex[k][s] = v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=['v']] # depends on [control=['for'], data=['k']] # depends on [control=['for'], data=['data']] out = [] for data in items: for k in complex: newv = tz.get_in([k, dd.get_sample_name(data)], complex) if newv: data = tz.update_in(data, k, lambda x: newv) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']] out.append(data) # depends on [control=['for'], data=['data']] return out
def auto_cleaned_path_uuid4(instance, filename: str) -> str: """ Gets upload path in this format: {MODEL_NAME}/{SAFE_UPLOADED_FILENAME}{SEPARATOR}{UUID4}{SUFFIX}. Use this function to prevent any collisions with existing files in same folder. :param instance: Instance of model or model class. :param filename: Uploaded file name. :return: Target upload path. """ stem, suffix = parse_filename(filename) base_dir = get_base_dir_from_object(instance) target_filename = get_safe_path_name(stem) rand_uuid = uuid.uuid4() return os.path.join(base_dir, "{target_filename}{SEPARATOR}{rand_uuid}{suffix}".format(target_filename=target_filename, SEPARATOR=SEPARATOR, rand_uuid=rand_uuid, suffix=suffix))
def function[auto_cleaned_path_uuid4, parameter[instance, filename]]: constant[ Gets upload path in this format: {MODEL_NAME}/{SAFE_UPLOADED_FILENAME}{SEPARATOR}{UUID4}{SUFFIX}. Use this function to prevent any collisions with existing files in same folder. :param instance: Instance of model or model class. :param filename: Uploaded file name. :return: Target upload path. ] <ast.Tuple object at 0x7da1b0aba920> assign[=] call[name[parse_filename], parameter[name[filename]]] variable[base_dir] assign[=] call[name[get_base_dir_from_object], parameter[name[instance]]] variable[target_filename] assign[=] call[name[get_safe_path_name], parameter[name[stem]]] variable[rand_uuid] assign[=] call[name[uuid].uuid4, parameter[]] return[call[name[os].path.join, parameter[name[base_dir], call[constant[{target_filename}{SEPARATOR}{rand_uuid}{suffix}].format, parameter[]]]]]
keyword[def] identifier[auto_cleaned_path_uuid4] ( identifier[instance] , identifier[filename] : identifier[str] )-> identifier[str] : literal[string] identifier[stem] , identifier[suffix] = identifier[parse_filename] ( identifier[filename] ) identifier[base_dir] = identifier[get_base_dir_from_object] ( identifier[instance] ) identifier[target_filename] = identifier[get_safe_path_name] ( identifier[stem] ) identifier[rand_uuid] = identifier[uuid] . identifier[uuid4] () keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , literal[string] . identifier[format] ( identifier[target_filename] = identifier[target_filename] , identifier[SEPARATOR] = identifier[SEPARATOR] , identifier[rand_uuid] = identifier[rand_uuid] , identifier[suffix] = identifier[suffix] ))
def auto_cleaned_path_uuid4(instance, filename: str) -> str: """ Gets upload path in this format: {MODEL_NAME}/{SAFE_UPLOADED_FILENAME}{SEPARATOR}{UUID4}{SUFFIX}. Use this function to prevent any collisions with existing files in same folder. :param instance: Instance of model or model class. :param filename: Uploaded file name. :return: Target upload path. """ (stem, suffix) = parse_filename(filename) base_dir = get_base_dir_from_object(instance) target_filename = get_safe_path_name(stem) rand_uuid = uuid.uuid4() return os.path.join(base_dir, '{target_filename}{SEPARATOR}{rand_uuid}{suffix}'.format(target_filename=target_filename, SEPARATOR=SEPARATOR, rand_uuid=rand_uuid, suffix=suffix))
def _byte_pad(data, bound=4): """ GLTF wants chunks aligned with 4- byte boundaries so this function will add padding to the end of a chunk of bytes so that it aligns with a specified boundary size Parameters -------------- data : bytes Data to be padded bound : int Length of desired boundary Returns -------------- padded : bytes Result where: (len(padded) % bound) == 0 """ bound = int(bound) if len(data) % bound != 0: pad = bytes(bound - (len(data) % bound)) result = bytes().join([data, pad]) assert (len(result) % bound) == 0 return result return data
def function[_byte_pad, parameter[data, bound]]: constant[ GLTF wants chunks aligned with 4- byte boundaries so this function will add padding to the end of a chunk of bytes so that it aligns with a specified boundary size Parameters -------------- data : bytes Data to be padded bound : int Length of desired boundary Returns -------------- padded : bytes Result where: (len(padded) % bound) == 0 ] variable[bound] assign[=] call[name[int], parameter[name[bound]]] if compare[binary_operation[call[name[len], parameter[name[data]]] <ast.Mod object at 0x7da2590d6920> name[bound]] not_equal[!=] constant[0]] begin[:] variable[pad] assign[=] call[name[bytes], parameter[binary_operation[name[bound] - binary_operation[call[name[len], parameter[name[data]]] <ast.Mod object at 0x7da2590d6920> name[bound]]]]] variable[result] assign[=] call[call[name[bytes], parameter[]].join, parameter[list[[<ast.Name object at 0x7da1b22d6fb0>, <ast.Name object at 0x7da1b22d67a0>]]]] assert[compare[binary_operation[call[name[len], parameter[name[result]]] <ast.Mod object at 0x7da2590d6920> name[bound]] equal[==] constant[0]]] return[name[result]] return[name[data]]
keyword[def] identifier[_byte_pad] ( identifier[data] , identifier[bound] = literal[int] ): literal[string] identifier[bound] = identifier[int] ( identifier[bound] ) keyword[if] identifier[len] ( identifier[data] )% identifier[bound] != literal[int] : identifier[pad] = identifier[bytes] ( identifier[bound] -( identifier[len] ( identifier[data] )% identifier[bound] )) identifier[result] = identifier[bytes] (). identifier[join] ([ identifier[data] , identifier[pad] ]) keyword[assert] ( identifier[len] ( identifier[result] )% identifier[bound] )== literal[int] keyword[return] identifier[result] keyword[return] identifier[data]
def _byte_pad(data, bound=4): """ GLTF wants chunks aligned with 4- byte boundaries so this function will add padding to the end of a chunk of bytes so that it aligns with a specified boundary size Parameters -------------- data : bytes Data to be padded bound : int Length of desired boundary Returns -------------- padded : bytes Result where: (len(padded) % bound) == 0 """ bound = int(bound) if len(data) % bound != 0: pad = bytes(bound - len(data) % bound) result = bytes().join([data, pad]) assert len(result) % bound == 0 return result # depends on [control=['if'], data=[]] return data
def run_experiment(self): """Sign up, run the ``participate`` method, then sign off and close the driver.""" try: self.sign_up() self.participate() if self.sign_off(): self.complete_experiment("worker_complete") else: self.complete_experiment("worker_failed") finally: self.driver.quit()
def function[run_experiment, parameter[self]]: constant[Sign up, run the ``participate`` method, then sign off and close the driver.] <ast.Try object at 0x7da1b0380910>
keyword[def] identifier[run_experiment] ( identifier[self] ): literal[string] keyword[try] : identifier[self] . identifier[sign_up] () identifier[self] . identifier[participate] () keyword[if] identifier[self] . identifier[sign_off] (): identifier[self] . identifier[complete_experiment] ( literal[string] ) keyword[else] : identifier[self] . identifier[complete_experiment] ( literal[string] ) keyword[finally] : identifier[self] . identifier[driver] . identifier[quit] ()
def run_experiment(self): """Sign up, run the ``participate`` method, then sign off and close the driver.""" try: self.sign_up() self.participate() if self.sign_off(): self.complete_experiment('worker_complete') # depends on [control=['if'], data=[]] else: self.complete_experiment('worker_failed') # depends on [control=['try'], data=[]] finally: self.driver.quit()
def get_likes(self, offset=0, limit=50): """ Get user's likes. """ response = self.client.get( self.client.USER_LIKES % (self.name, offset, limit)) return self._parse_response(response, strack)
def function[get_likes, parameter[self, offset, limit]]: constant[ Get user's likes. ] variable[response] assign[=] call[name[self].client.get, parameter[binary_operation[name[self].client.USER_LIKES <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0a84cd0>, <ast.Name object at 0x7da1b0a87b80>, <ast.Name object at 0x7da1b0a852a0>]]]]] return[call[name[self]._parse_response, parameter[name[response], name[strack]]]]
keyword[def] identifier[get_likes] ( identifier[self] , identifier[offset] = literal[int] , identifier[limit] = literal[int] ): literal[string] identifier[response] = identifier[self] . identifier[client] . identifier[get] ( identifier[self] . identifier[client] . identifier[USER_LIKES] %( identifier[self] . identifier[name] , identifier[offset] , identifier[limit] )) keyword[return] identifier[self] . identifier[_parse_response] ( identifier[response] , identifier[strack] )
def get_likes(self, offset=0, limit=50): """ Get user's likes. """ response = self.client.get(self.client.USER_LIKES % (self.name, offset, limit)) return self._parse_response(response, strack)
def __update_paths(self, settings): """ Set custom paths if necessary """ if not isinstance(settings, dict): return if 'custom_base_path' in settings: base_path = settings['custom_base_path'] base_path = join(dirname(__file__), base_path) self.__load_paths(base_path)
def function[__update_paths, parameter[self, settings]]: constant[ Set custom paths if necessary ] if <ast.UnaryOp object at 0x7da1b19509a0> begin[:] return[None] if compare[constant[custom_base_path] in name[settings]] begin[:] variable[base_path] assign[=] call[name[settings]][constant[custom_base_path]] variable[base_path] assign[=] call[name[join], parameter[call[name[dirname], parameter[name[__file__]]], name[base_path]]] call[name[self].__load_paths, parameter[name[base_path]]]
keyword[def] identifier[__update_paths] ( identifier[self] , identifier[settings] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[settings] , identifier[dict] ): keyword[return] keyword[if] literal[string] keyword[in] identifier[settings] : identifier[base_path] = identifier[settings] [ literal[string] ] identifier[base_path] = identifier[join] ( identifier[dirname] ( identifier[__file__] ), identifier[base_path] ) identifier[self] . identifier[__load_paths] ( identifier[base_path] )
def __update_paths(self, settings): """ Set custom paths if necessary """ if not isinstance(settings, dict): return # depends on [control=['if'], data=[]] if 'custom_base_path' in settings: base_path = settings['custom_base_path'] base_path = join(dirname(__file__), base_path) self.__load_paths(base_path) # depends on [control=['if'], data=['settings']]
def vbreak(image, mask=None, iterations=1): '''Remove horizontal breaks 1 1 1 1 1 1 0 1 0 -> 0 0 0 (this case only) 1 1 1 1 1 1 ''' global vbreak_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, vbreak_table, False) if not mask is None: result[~mask] = image[~mask] return result
def function[vbreak, parameter[image, mask, iterations]]: constant[Remove horizontal breaks 1 1 1 1 1 1 0 1 0 -> 0 0 0 (this case only) 1 1 1 1 1 1 ] <ast.Global object at 0x7da20c6e7e80> if compare[name[mask] is constant[None]] begin[:] variable[masked_image] assign[=] name[image] variable[result] assign[=] call[name[table_lookup], parameter[name[masked_image], name[vbreak_table], constant[False]]] if <ast.UnaryOp object at 0x7da20c6e4ca0> begin[:] call[name[result]][<ast.UnaryOp object at 0x7da20e963be0>] assign[=] call[name[image]][<ast.UnaryOp object at 0x7da20e961f60>] return[name[result]]
keyword[def] identifier[vbreak] ( identifier[image] , identifier[mask] = keyword[None] , identifier[iterations] = literal[int] ): literal[string] keyword[global] identifier[vbreak_table] keyword[if] identifier[mask] keyword[is] keyword[None] : identifier[masked_image] = identifier[image] keyword[else] : identifier[masked_image] = identifier[image] . identifier[astype] ( identifier[bool] ). identifier[copy] () identifier[masked_image] [~ identifier[mask] ]= keyword[False] identifier[result] = identifier[table_lookup] ( identifier[masked_image] , identifier[vbreak_table] , keyword[False] ) keyword[if] keyword[not] identifier[mask] keyword[is] keyword[None] : identifier[result] [~ identifier[mask] ]= identifier[image] [~ identifier[mask] ] keyword[return] identifier[result]
def vbreak(image, mask=None, iterations=1): """Remove horizontal breaks 1 1 1 1 1 1 0 1 0 -> 0 0 0 (this case only) 1 1 1 1 1 1 """ global vbreak_table if mask is None: masked_image = image # depends on [control=['if'], data=[]] else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, vbreak_table, False) if not mask is None: result[~mask] = image[~mask] # depends on [control=['if'], data=[]] return result
def status(self, external_id, **params): """ Retrieves the verification result for an App Verify transaction by external_id. To ensure a secure verification flow you must check the status using TeleSign's servers on your backend. Do not rely on the SDK alone to indicate a successful verification. See https://developer.telesign.com/docs/app-verify-android-sdk-self#section-get-status-service or https://developer.telesign.com/docs/app-verify-ios-sdk-self#section-get-status-service for detailed API documentation. """ return self.get(APPVERIFY_STATUS_RESOURCE.format(external_id=external_id), **params)
def function[status, parameter[self, external_id]]: constant[ Retrieves the verification result for an App Verify transaction by external_id. To ensure a secure verification flow you must check the status using TeleSign's servers on your backend. Do not rely on the SDK alone to indicate a successful verification. See https://developer.telesign.com/docs/app-verify-android-sdk-self#section-get-status-service or https://developer.telesign.com/docs/app-verify-ios-sdk-self#section-get-status-service for detailed API documentation. ] return[call[name[self].get, parameter[call[name[APPVERIFY_STATUS_RESOURCE].format, parameter[]]]]]
keyword[def] identifier[status] ( identifier[self] , identifier[external_id] ,** identifier[params] ): literal[string] keyword[return] identifier[self] . identifier[get] ( identifier[APPVERIFY_STATUS_RESOURCE] . identifier[format] ( identifier[external_id] = identifier[external_id] ), ** identifier[params] )
def status(self, external_id, **params): """ Retrieves the verification result for an App Verify transaction by external_id. To ensure a secure verification flow you must check the status using TeleSign's servers on your backend. Do not rely on the SDK alone to indicate a successful verification. See https://developer.telesign.com/docs/app-verify-android-sdk-self#section-get-status-service or https://developer.telesign.com/docs/app-verify-ios-sdk-self#section-get-status-service for detailed API documentation. """ return self.get(APPVERIFY_STATUS_RESOURCE.format(external_id=external_id), **params)
def read_config(): """ Read the configuration file and parse the different environments. Returns: ConfigParser object """ if not os.path.isfile(CONFIG): with open(CONFIG, "w"): pass parser = ConfigParser() parser.read(CONFIG) return parser
def function[read_config, parameter[]]: constant[ Read the configuration file and parse the different environments. Returns: ConfigParser object ] if <ast.UnaryOp object at 0x7da20e954a30> begin[:] with call[name[open], parameter[name[CONFIG], constant[w]]] begin[:] pass variable[parser] assign[=] call[name[ConfigParser], parameter[]] call[name[parser].read, parameter[name[CONFIG]]] return[name[parser]]
keyword[def] identifier[read_config] (): literal[string] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[CONFIG] ): keyword[with] identifier[open] ( identifier[CONFIG] , literal[string] ): keyword[pass] identifier[parser] = identifier[ConfigParser] () identifier[parser] . identifier[read] ( identifier[CONFIG] ) keyword[return] identifier[parser]
def read_config(): """ Read the configuration file and parse the different environments. Returns: ConfigParser object """ if not os.path.isfile(CONFIG): with open(CONFIG, 'w'): pass # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] parser = ConfigParser() parser.read(CONFIG) return parser
def set_end_date(self, date): """Sets the end date. arg: date (osid.calendaring.DateTime): the new date raise: InvalidArgument - ``date`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``date`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if self.get_end_date_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_date_time(date, self.get_end_date_metadata()): raise errors.InvalidArgument() # self._my_map['endDate'] = self._get_date_map(date) self._my_map['endDate'] = date
def function[set_end_date, parameter[self, date]]: constant[Sets the end date. arg: date (osid.calendaring.DateTime): the new date raise: InvalidArgument - ``date`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``date`` is ``null`` *compliance: mandatory -- This method must be implemented.* ] if call[call[name[self].get_end_date_metadata, parameter[]].is_read_only, parameter[]] begin[:] <ast.Raise object at 0x7da207f02a70> if <ast.UnaryOp object at 0x7da207f031f0> begin[:] <ast.Raise object at 0x7da20c6a8280> call[name[self]._my_map][constant[endDate]] assign[=] name[date]
keyword[def] identifier[set_end_date] ( identifier[self] , identifier[date] ): literal[string] keyword[if] identifier[self] . identifier[get_end_date_metadata] (). identifier[is_read_only] (): keyword[raise] identifier[errors] . identifier[NoAccess] () keyword[if] keyword[not] identifier[self] . identifier[_is_valid_date_time] ( identifier[date] , identifier[self] . identifier[get_end_date_metadata] ()): keyword[raise] identifier[errors] . identifier[InvalidArgument] () identifier[self] . identifier[_my_map] [ literal[string] ]= identifier[date]
def set_end_date(self, date): """Sets the end date. arg: date (osid.calendaring.DateTime): the new date raise: InvalidArgument - ``date`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``date`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if self.get_end_date_metadata().is_read_only(): raise errors.NoAccess() # depends on [control=['if'], data=[]] if not self._is_valid_date_time(date, self.get_end_date_metadata()): raise errors.InvalidArgument() # depends on [control=['if'], data=[]] # self._my_map['endDate'] = self._get_date_map(date) self._my_map['endDate'] = date
def get_completions(self, document, complete_event): # Get word/text before cursor. if self.sentence: word_before_cursor = document.text_before_cursor else: word_before_cursor = document.get_word_before_cursor(WORD=self.WORD) if self.ignore_case: word_before_cursor = word_before_cursor.lower() def word_matches(word): """ True when the word before the cursor matches. """ if self.ignore_case: word = word.lower() if self.match_middle: return word_before_cursor in word else: return word.startswith(word_before_cursor) ''' log.debug("------------------------------------------------------") log.debug(f"** WORD {self.WORD}") log.debug(f"** words {self.words}") log.debug(f"** word_before_cursor {word_before_cursor}") ''' words = self._words_callable() if self._words_callable else self.words for a in words: if word_matches(a): display_meta = self.meta_dict.get(a, '') log.debug(f"MATCH: {a}, {-len(word_before_cursor)}," f" meta: {display_meta}") yield Completion(self.quote(a), -len(word_before_cursor), display_meta=display_meta)
def function[get_completions, parameter[self, document, complete_event]]: if name[self].sentence begin[:] variable[word_before_cursor] assign[=] name[document].text_before_cursor if name[self].ignore_case begin[:] variable[word_before_cursor] assign[=] call[name[word_before_cursor].lower, parameter[]] def function[word_matches, parameter[word]]: constant[ True when the word before the cursor matches. ] if name[self].ignore_case begin[:] variable[word] assign[=] call[name[word].lower, parameter[]] if name[self].match_middle begin[:] return[compare[name[word_before_cursor] in name[word]]] constant[ log.debug("------------------------------------------------------") log.debug(f"** WORD {self.WORD}") log.debug(f"** words {self.words}") log.debug(f"** word_before_cursor {word_before_cursor}") ] variable[words] assign[=] <ast.IfExp object at 0x7da204960f40> for taget[name[a]] in starred[name[words]] begin[:] if call[name[word_matches], parameter[name[a]]] begin[:] variable[display_meta] assign[=] call[name[self].meta_dict.get, parameter[name[a], constant[]]] call[name[log].debug, parameter[<ast.JoinedStr object at 0x7da204963550>]] <ast.Yield object at 0x7da204961b70>
keyword[def] identifier[get_completions] ( identifier[self] , identifier[document] , identifier[complete_event] ): keyword[if] identifier[self] . identifier[sentence] : identifier[word_before_cursor] = identifier[document] . identifier[text_before_cursor] keyword[else] : identifier[word_before_cursor] = identifier[document] . identifier[get_word_before_cursor] ( identifier[WORD] = identifier[self] . identifier[WORD] ) keyword[if] identifier[self] . identifier[ignore_case] : identifier[word_before_cursor] = identifier[word_before_cursor] . identifier[lower] () keyword[def] identifier[word_matches] ( identifier[word] ): literal[string] keyword[if] identifier[self] . identifier[ignore_case] : identifier[word] = identifier[word] . identifier[lower] () keyword[if] identifier[self] . identifier[match_middle] : keyword[return] identifier[word_before_cursor] keyword[in] identifier[word] keyword[else] : keyword[return] identifier[word] . identifier[startswith] ( identifier[word_before_cursor] ) literal[string] identifier[words] = identifier[self] . identifier[_words_callable] () keyword[if] identifier[self] . identifier[_words_callable] keyword[else] identifier[self] . identifier[words] keyword[for] identifier[a] keyword[in] identifier[words] : keyword[if] identifier[word_matches] ( identifier[a] ): identifier[display_meta] = identifier[self] . identifier[meta_dict] . identifier[get] ( identifier[a] , literal[string] ) identifier[log] . identifier[debug] ( literal[string] literal[string] ) keyword[yield] identifier[Completion] ( identifier[self] . identifier[quote] ( identifier[a] ),- identifier[len] ( identifier[word_before_cursor] ), identifier[display_meta] = identifier[display_meta] )
def get_completions(self, document, complete_event): # Get word/text before cursor. if self.sentence: word_before_cursor = document.text_before_cursor # depends on [control=['if'], data=[]] else: word_before_cursor = document.get_word_before_cursor(WORD=self.WORD) if self.ignore_case: word_before_cursor = word_before_cursor.lower() # depends on [control=['if'], data=[]] def word_matches(word): """ True when the word before the cursor matches. """ if self.ignore_case: word = word.lower() # depends on [control=['if'], data=[]] if self.match_middle: return word_before_cursor in word # depends on [control=['if'], data=[]] else: return word.startswith(word_before_cursor) '\n log.debug("------------------------------------------------------")\n log.debug(f"** WORD {self.WORD}")\n log.debug(f"** words {self.words}")\n log.debug(f"** word_before_cursor {word_before_cursor}")\n ' words = self._words_callable() if self._words_callable else self.words for a in words: if word_matches(a): display_meta = self.meta_dict.get(a, '') log.debug(f'MATCH: {a}, {-len(word_before_cursor)}, meta: {display_meta}') yield Completion(self.quote(a), -len(word_before_cursor), display_meta=display_meta) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
def list(self, **params): """ Retrieve text messages Returns Text Messages, according to the parameters provided :calls: ``get /text_messages`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of TextMessages. :rtype: list """ _, _, text_messages = self.http_client.get("/text_messages", params=params) return text_messages
def function[list, parameter[self]]: constant[ Retrieve text messages Returns Text Messages, according to the parameters provided :calls: ``get /text_messages`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of TextMessages. :rtype: list ] <ast.Tuple object at 0x7da18bc71990> assign[=] call[name[self].http_client.get, parameter[constant[/text_messages]]] return[name[text_messages]]
keyword[def] identifier[list] ( identifier[self] ,** identifier[params] ): literal[string] identifier[_] , identifier[_] , identifier[text_messages] = identifier[self] . identifier[http_client] . identifier[get] ( literal[string] , identifier[params] = identifier[params] ) keyword[return] identifier[text_messages]
def list(self, **params): """ Retrieve text messages Returns Text Messages, according to the parameters provided :calls: ``get /text_messages`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of TextMessages. :rtype: list """ (_, _, text_messages) = self.http_client.get('/text_messages', params=params) return text_messages
def load_log(args): """Load a `logging.Logger` object. Arguments --------- args : `argparse.Namespace` object Namespace containing required settings: {`args.debug`, `args.verbose`, and `args.log_filename`}. Returns ------- log : `logging.Logger` object """ from astrocats.catalog.utils import logger # Determine verbosity ('None' means use default) log_stream_level = None if args.debug: log_stream_level = logger.DEBUG elif args.verbose: log_stream_level = logger.INFO # Create log log = logger.get_logger( stream_level=log_stream_level, tofile=args.log_filename) log._verbose = args.verbose log._debug = args.debug return log
def function[load_log, parameter[args]]: constant[Load a `logging.Logger` object. Arguments --------- args : `argparse.Namespace` object Namespace containing required settings: {`args.debug`, `args.verbose`, and `args.log_filename`}. Returns ------- log : `logging.Logger` object ] from relative_module[astrocats.catalog.utils] import module[logger] variable[log_stream_level] assign[=] constant[None] if name[args].debug begin[:] variable[log_stream_level] assign[=] name[logger].DEBUG variable[log] assign[=] call[name[logger].get_logger, parameter[]] name[log]._verbose assign[=] name[args].verbose name[log]._debug assign[=] name[args].debug return[name[log]]
keyword[def] identifier[load_log] ( identifier[args] ): literal[string] keyword[from] identifier[astrocats] . identifier[catalog] . identifier[utils] keyword[import] identifier[logger] identifier[log_stream_level] = keyword[None] keyword[if] identifier[args] . identifier[debug] : identifier[log_stream_level] = identifier[logger] . identifier[DEBUG] keyword[elif] identifier[args] . identifier[verbose] : identifier[log_stream_level] = identifier[logger] . identifier[INFO] identifier[log] = identifier[logger] . identifier[get_logger] ( identifier[stream_level] = identifier[log_stream_level] , identifier[tofile] = identifier[args] . identifier[log_filename] ) identifier[log] . identifier[_verbose] = identifier[args] . identifier[verbose] identifier[log] . identifier[_debug] = identifier[args] . identifier[debug] keyword[return] identifier[log]
def load_log(args): """Load a `logging.Logger` object. Arguments --------- args : `argparse.Namespace` object Namespace containing required settings: {`args.debug`, `args.verbose`, and `args.log_filename`}. Returns ------- log : `logging.Logger` object """ from astrocats.catalog.utils import logger # Determine verbosity ('None' means use default) log_stream_level = None if args.debug: log_stream_level = logger.DEBUG # depends on [control=['if'], data=[]] elif args.verbose: log_stream_level = logger.INFO # depends on [control=['if'], data=[]] # Create log log = logger.get_logger(stream_level=log_stream_level, tofile=args.log_filename) log._verbose = args.verbose log._debug = args.debug return log
def timer(self, stat, tags=None): """Contextmanager for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> def long_function(): ... with mymetrics.timer('long_function'): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds. """ if six.PY3: start_time = time.perf_counter() else: start_time = time.time() yield if six.PY3: end_time = time.perf_counter() else: end_time = time.time() delta = end_time - start_time self.timing(stat, value=delta * 1000.0, tags=tags)
def function[timer, parameter[self, stat, tags]]: constant[Contextmanager for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> def long_function(): ... with mymetrics.timer('long_function'): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds. ] if name[six].PY3 begin[:] variable[start_time] assign[=] call[name[time].perf_counter, parameter[]] <ast.Yield object at 0x7da1b02c6f80> if name[six].PY3 begin[:] variable[end_time] assign[=] call[name[time].perf_counter, parameter[]] variable[delta] assign[=] binary_operation[name[end_time] - name[start_time]] call[name[self].timing, parameter[name[stat]]]
keyword[def] identifier[timer] ( identifier[self] , identifier[stat] , identifier[tags] = keyword[None] ): literal[string] keyword[if] identifier[six] . identifier[PY3] : identifier[start_time] = identifier[time] . identifier[perf_counter] () keyword[else] : identifier[start_time] = identifier[time] . identifier[time] () keyword[yield] keyword[if] identifier[six] . identifier[PY3] : identifier[end_time] = identifier[time] . identifier[perf_counter] () keyword[else] : identifier[end_time] = identifier[time] . identifier[time] () identifier[delta] = identifier[end_time] - identifier[start_time] identifier[self] . identifier[timing] ( identifier[stat] , identifier[value] = identifier[delta] * literal[int] , identifier[tags] = identifier[tags] )
def timer(self, stat, tags=None): """Contextmanager for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> def long_function(): ... with mymetrics.timer('long_function'): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds. """ if six.PY3: start_time = time.perf_counter() # depends on [control=['if'], data=[]] else: start_time = time.time() yield if six.PY3: end_time = time.perf_counter() # depends on [control=['if'], data=[]] else: end_time = time.time() delta = end_time - start_time self.timing(stat, value=delta * 1000.0, tags=tags)
def param_load(self): """ Queries the server for the parameter information and other metadata associated with this task """ escaped_uri = urllib.parse.quote(self.uri) request = urllib.request.Request(self.server_data.url + '/rest/v1/tasks/' + escaped_uri) if self.server_data.authorization_header() is not None: request.add_header('Authorization', self.server_data.authorization_header()) request.add_header('User-Agent', 'GenePatternRest') response = urllib.request.urlopen(request) self.json = response.read().decode('utf-8') self.dto = json.loads(self.json) self.description = self.dto['description'] if 'description' in self.dto else "" self.name = self.dto['name'] self.documentation = self.dto['documentation'] if 'documentation' in self.dto else "" self.lsid = self.dto['lsid'] self.version = self.dto['version'] if 'version' in self.dto else "" self.params = [] for param in self.dto['params']: self.params.append(GPTaskParam(self, param)) self._params_loaded = True
def function[param_load, parameter[self]]: constant[ Queries the server for the parameter information and other metadata associated with this task ] variable[escaped_uri] assign[=] call[name[urllib].parse.quote, parameter[name[self].uri]] variable[request] assign[=] call[name[urllib].request.Request, parameter[binary_operation[binary_operation[name[self].server_data.url + constant[/rest/v1/tasks/]] + name[escaped_uri]]]] if compare[call[name[self].server_data.authorization_header, parameter[]] is_not constant[None]] begin[:] call[name[request].add_header, parameter[constant[Authorization], call[name[self].server_data.authorization_header, parameter[]]]] call[name[request].add_header, parameter[constant[User-Agent], constant[GenePatternRest]]] variable[response] assign[=] call[name[urllib].request.urlopen, parameter[name[request]]] name[self].json assign[=] call[call[name[response].read, parameter[]].decode, parameter[constant[utf-8]]] name[self].dto assign[=] call[name[json].loads, parameter[name[self].json]] name[self].description assign[=] <ast.IfExp object at 0x7da2041dbeb0> name[self].name assign[=] call[name[self].dto][constant[name]] name[self].documentation assign[=] <ast.IfExp object at 0x7da2041d9d50> name[self].lsid assign[=] call[name[self].dto][constant[lsid]] name[self].version assign[=] <ast.IfExp object at 0x7da2041db820> name[self].params assign[=] list[[]] for taget[name[param]] in starred[call[name[self].dto][constant[params]]] begin[:] call[name[self].params.append, parameter[call[name[GPTaskParam], parameter[name[self], name[param]]]]] name[self]._params_loaded assign[=] constant[True]
keyword[def] identifier[param_load] ( identifier[self] ): literal[string] identifier[escaped_uri] = identifier[urllib] . identifier[parse] . identifier[quote] ( identifier[self] . identifier[uri] ) identifier[request] = identifier[urllib] . identifier[request] . identifier[Request] ( identifier[self] . identifier[server_data] . identifier[url] + literal[string] + identifier[escaped_uri] ) keyword[if] identifier[self] . identifier[server_data] . identifier[authorization_header] () keyword[is] keyword[not] keyword[None] : identifier[request] . identifier[add_header] ( literal[string] , identifier[self] . identifier[server_data] . identifier[authorization_header] ()) identifier[request] . identifier[add_header] ( literal[string] , literal[string] ) identifier[response] = identifier[urllib] . identifier[request] . identifier[urlopen] ( identifier[request] ) identifier[self] . identifier[json] = identifier[response] . identifier[read] (). identifier[decode] ( literal[string] ) identifier[self] . identifier[dto] = identifier[json] . identifier[loads] ( identifier[self] . identifier[json] ) identifier[self] . identifier[description] = identifier[self] . identifier[dto] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[self] . identifier[dto] keyword[else] literal[string] identifier[self] . identifier[name] = identifier[self] . identifier[dto] [ literal[string] ] identifier[self] . identifier[documentation] = identifier[self] . identifier[dto] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[self] . identifier[dto] keyword[else] literal[string] identifier[self] . identifier[lsid] = identifier[self] . identifier[dto] [ literal[string] ] identifier[self] . identifier[version] = identifier[self] . identifier[dto] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[self] . identifier[dto] keyword[else] literal[string] identifier[self] . identifier[params] =[] keyword[for] identifier[param] keyword[in] identifier[self] . identifier[dto] [ literal[string] ]: identifier[self] . identifier[params] . identifier[append] ( identifier[GPTaskParam] ( identifier[self] , identifier[param] )) identifier[self] . identifier[_params_loaded] = keyword[True]
def param_load(self): """ Queries the server for the parameter information and other metadata associated with this task """ escaped_uri = urllib.parse.quote(self.uri) request = urllib.request.Request(self.server_data.url + '/rest/v1/tasks/' + escaped_uri) if self.server_data.authorization_header() is not None: request.add_header('Authorization', self.server_data.authorization_header()) # depends on [control=['if'], data=[]] request.add_header('User-Agent', 'GenePatternRest') response = urllib.request.urlopen(request) self.json = response.read().decode('utf-8') self.dto = json.loads(self.json) self.description = self.dto['description'] if 'description' in self.dto else '' self.name = self.dto['name'] self.documentation = self.dto['documentation'] if 'documentation' in self.dto else '' self.lsid = self.dto['lsid'] self.version = self.dto['version'] if 'version' in self.dto else '' self.params = [] for param in self.dto['params']: self.params.append(GPTaskParam(self, param)) # depends on [control=['for'], data=['param']] self._params_loaded = True
def configure(self, *args, **kwargs): """Configures a SWAG manager. Overrides existing configuration.""" self.version = kwargs['schema_version'] self.namespace = kwargs['namespace'] self.backend = get(kwargs['type'])(*args, **kwargs) self.context = kwargs.pop('schema_context', {})
def function[configure, parameter[self]]: constant[Configures a SWAG manager. Overrides existing configuration.] name[self].version assign[=] call[name[kwargs]][constant[schema_version]] name[self].namespace assign[=] call[name[kwargs]][constant[namespace]] name[self].backend assign[=] call[call[name[get], parameter[call[name[kwargs]][constant[type]]]], parameter[<ast.Starred object at 0x7da1b084fd90>]] name[self].context assign[=] call[name[kwargs].pop, parameter[constant[schema_context], dictionary[[], []]]]
keyword[def] identifier[configure] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[self] . identifier[version] = identifier[kwargs] [ literal[string] ] identifier[self] . identifier[namespace] = identifier[kwargs] [ literal[string] ] identifier[self] . identifier[backend] = identifier[get] ( identifier[kwargs] [ literal[string] ])(* identifier[args] ,** identifier[kwargs] ) identifier[self] . identifier[context] = identifier[kwargs] . identifier[pop] ( literal[string] ,{})
def configure(self, *args, **kwargs): """Configures a SWAG manager. Overrides existing configuration.""" self.version = kwargs['schema_version'] self.namespace = kwargs['namespace'] self.backend = get(kwargs['type'])(*args, **kwargs) self.context = kwargs.pop('schema_context', {})
def splitDataset(dataset, groupby): """ Split the given dataset into multiple datasets grouped by the given groupby function. For example:: # Split mnist dataset into 10 datasets, one dataset for each label splitDataset(mnist, groupby=lambda x: x[1]) # Split mnist dataset into 5 datasets, one dataset for each label pair: [0,1], [2,3],... splitDataset(mnist, groupby=lambda x: x[1] // 2) :param dataset: Source dataset to split :param groupby: Group by function. See :func:`itertools.groupby` :return: List of datasets """ # Split dataset based on the group by function and keep track of indices indicesByGroup = collections.defaultdict(list) for k, g in itertools.groupby(enumerate(dataset), key=lambda x: groupby(x[1])): indicesByGroup[k].extend([i[0] for i in g]) # Sort by group and create a Subset dataset for each of the group indices _, indices = zip(*(sorted(indicesByGroup.items(), key=lambda x: x[0]))) return [Subset(dataset, indices=i) for i in indices]
def function[splitDataset, parameter[dataset, groupby]]: constant[ Split the given dataset into multiple datasets grouped by the given groupby function. For example:: # Split mnist dataset into 10 datasets, one dataset for each label splitDataset(mnist, groupby=lambda x: x[1]) # Split mnist dataset into 5 datasets, one dataset for each label pair: [0,1], [2,3],... splitDataset(mnist, groupby=lambda x: x[1] // 2) :param dataset: Source dataset to split :param groupby: Group by function. See :func:`itertools.groupby` :return: List of datasets ] variable[indicesByGroup] assign[=] call[name[collections].defaultdict, parameter[name[list]]] for taget[tuple[[<ast.Name object at 0x7da1b0823dc0>, <ast.Name object at 0x7da1b0823d90>]]] in starred[call[name[itertools].groupby, parameter[call[name[enumerate], parameter[name[dataset]]]]]] begin[:] call[call[name[indicesByGroup]][name[k]].extend, parameter[<ast.ListComp object at 0x7da1b0821990>]] <ast.Tuple object at 0x7da1b0823b50> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1b0821480>]] return[<ast.ListComp object at 0x7da1b0820a60>]
keyword[def] identifier[splitDataset] ( identifier[dataset] , identifier[groupby] ): literal[string] identifier[indicesByGroup] = identifier[collections] . identifier[defaultdict] ( identifier[list] ) keyword[for] identifier[k] , identifier[g] keyword[in] identifier[itertools] . identifier[groupby] ( identifier[enumerate] ( identifier[dataset] ), identifier[key] = keyword[lambda] identifier[x] : identifier[groupby] ( identifier[x] [ literal[int] ])): identifier[indicesByGroup] [ identifier[k] ]. identifier[extend] ([ identifier[i] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[g] ]) identifier[_] , identifier[indices] = identifier[zip] (*( identifier[sorted] ( identifier[indicesByGroup] . identifier[items] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]))) keyword[return] [ identifier[Subset] ( identifier[dataset] , identifier[indices] = identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[indices] ]
def splitDataset(dataset, groupby): """ Split the given dataset into multiple datasets grouped by the given groupby function. For example:: # Split mnist dataset into 10 datasets, one dataset for each label splitDataset(mnist, groupby=lambda x: x[1]) # Split mnist dataset into 5 datasets, one dataset for each label pair: [0,1], [2,3],... splitDataset(mnist, groupby=lambda x: x[1] // 2) :param dataset: Source dataset to split :param groupby: Group by function. See :func:`itertools.groupby` :return: List of datasets """ # Split dataset based on the group by function and keep track of indices indicesByGroup = collections.defaultdict(list) for (k, g) in itertools.groupby(enumerate(dataset), key=lambda x: groupby(x[1])): indicesByGroup[k].extend([i[0] for i in g]) # depends on [control=['for'], data=[]] # Sort by group and create a Subset dataset for each of the group indices (_, indices) = zip(*sorted(indicesByGroup.items(), key=lambda x: x[0])) return [Subset(dataset, indices=i) for i in indices]
def matrix_from_basis_coefficients(expansion: value.LinearDict[str], basis: Dict[str, np.ndarray]) -> np.ndarray: """Computes linear combination of basis vectors with given coefficients.""" some_element = next(iter(basis.values())) result = np.zeros_like(some_element, dtype=np.complex128) for name, coefficient in expansion.items(): result += coefficient * basis[name] return result
def function[matrix_from_basis_coefficients, parameter[expansion, basis]]: constant[Computes linear combination of basis vectors with given coefficients.] variable[some_element] assign[=] call[name[next], parameter[call[name[iter], parameter[call[name[basis].values, parameter[]]]]]] variable[result] assign[=] call[name[np].zeros_like, parameter[name[some_element]]] for taget[tuple[[<ast.Name object at 0x7da1b1ce8cd0>, <ast.Name object at 0x7da1b1ce98d0>]]] in starred[call[name[expansion].items, parameter[]]] begin[:] <ast.AugAssign object at 0x7da1b1ce99f0> return[name[result]]
keyword[def] identifier[matrix_from_basis_coefficients] ( identifier[expansion] : identifier[value] . identifier[LinearDict] [ identifier[str] ], identifier[basis] : identifier[Dict] [ identifier[str] , identifier[np] . identifier[ndarray] ])-> identifier[np] . identifier[ndarray] : literal[string] identifier[some_element] = identifier[next] ( identifier[iter] ( identifier[basis] . identifier[values] ())) identifier[result] = identifier[np] . identifier[zeros_like] ( identifier[some_element] , identifier[dtype] = identifier[np] . identifier[complex128] ) keyword[for] identifier[name] , identifier[coefficient] keyword[in] identifier[expansion] . identifier[items] (): identifier[result] += identifier[coefficient] * identifier[basis] [ identifier[name] ] keyword[return] identifier[result]
def matrix_from_basis_coefficients(expansion: value.LinearDict[str], basis: Dict[str, np.ndarray]) -> np.ndarray: """Computes linear combination of basis vectors with given coefficients.""" some_element = next(iter(basis.values())) result = np.zeros_like(some_element, dtype=np.complex128) for (name, coefficient) in expansion.items(): result += coefficient * basis[name] # depends on [control=['for'], data=[]] return result
def rename_channels(self, *, verbose=True, **kwargs): """Rename a set of channels. Parameters ---------- kwargs Keyword arguments of the form current:'new'. verbose : boolean (optional) Toggle talkback. Default is True """ # ensure that items will remain unique changed = kwargs.keys() for k, v in kwargs.items(): if v not in changed and v in self.keys(): raise wt_exceptions.NameNotUniqueError(v) # compile references to items that are changing new = {} for k, v in kwargs.items(): obj = self[k] index = self.channel_names.index(k) # rename new[v] = obj, index Group._instances.pop(obj.fullpath, None) obj.natural_name = str(v) # remove old references del self[k] # apply new references names = list(self.channel_names) for v, value in new.items(): obj, index = value self[v] = obj names[index] = v self.channel_names = names # finish if verbose: print("{0} channel(s) renamed:".format(len(kwargs))) for k, v in kwargs.items(): print(" {0} --> {1}".format(k, v))
def function[rename_channels, parameter[self]]: constant[Rename a set of channels. Parameters ---------- kwargs Keyword arguments of the form current:'new'. verbose : boolean (optional) Toggle talkback. Default is True ] variable[changed] assign[=] call[name[kwargs].keys, parameter[]] for taget[tuple[[<ast.Name object at 0x7da20c6ab400>, <ast.Name object at 0x7da20c6a8580>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da20c6a8040> begin[:] <ast.Raise object at 0x7da20c6ab550> variable[new] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da20c6a8220>, <ast.Name object at 0x7da20c6a84f0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:] variable[obj] assign[=] call[name[self]][name[k]] variable[index] assign[=] call[name[self].channel_names.index, parameter[name[k]]] call[name[new]][name[v]] assign[=] tuple[[<ast.Name object at 0x7da20c6ab5b0>, <ast.Name object at 0x7da20c6a89d0>]] call[name[Group]._instances.pop, parameter[name[obj].fullpath, constant[None]]] name[obj].natural_name assign[=] call[name[str], parameter[name[v]]] <ast.Delete object at 0x7da20c6a8a30> variable[names] assign[=] call[name[list], parameter[name[self].channel_names]] for taget[tuple[[<ast.Name object at 0x7da20c6ab160>, <ast.Name object at 0x7da20c6ab8b0>]]] in starred[call[name[new].items, parameter[]]] begin[:] <ast.Tuple object at 0x7da20c6a8400> assign[=] name[value] call[name[self]][name[v]] assign[=] name[obj] call[name[names]][name[index]] assign[=] name[v] name[self].channel_names assign[=] name[names] if name[verbose] begin[:] call[name[print], parameter[call[constant[{0} channel(s) renamed:].format, parameter[call[name[len], parameter[name[kwargs]]]]]]] for taget[tuple[[<ast.Name object at 0x7da20c6a8d00>, <ast.Name object at 0x7da20c6a9ab0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:] call[name[print], parameter[call[constant[ {0} --> {1}].format, parameter[name[k], name[v]]]]]
keyword[def] identifier[rename_channels] ( identifier[self] ,*, identifier[verbose] = keyword[True] ,** identifier[kwargs] ): literal[string] identifier[changed] = identifier[kwargs] . identifier[keys] () keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] (): keyword[if] identifier[v] keyword[not] keyword[in] identifier[changed] keyword[and] identifier[v] keyword[in] identifier[self] . identifier[keys] (): keyword[raise] identifier[wt_exceptions] . identifier[NameNotUniqueError] ( identifier[v] ) identifier[new] ={} keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] (): identifier[obj] = identifier[self] [ identifier[k] ] identifier[index] = identifier[self] . identifier[channel_names] . identifier[index] ( identifier[k] ) identifier[new] [ identifier[v] ]= identifier[obj] , identifier[index] identifier[Group] . identifier[_instances] . identifier[pop] ( identifier[obj] . identifier[fullpath] , keyword[None] ) identifier[obj] . identifier[natural_name] = identifier[str] ( identifier[v] ) keyword[del] identifier[self] [ identifier[k] ] identifier[names] = identifier[list] ( identifier[self] . identifier[channel_names] ) keyword[for] identifier[v] , identifier[value] keyword[in] identifier[new] . identifier[items] (): identifier[obj] , identifier[index] = identifier[value] identifier[self] [ identifier[v] ]= identifier[obj] identifier[names] [ identifier[index] ]= identifier[v] identifier[self] . identifier[channel_names] = identifier[names] keyword[if] identifier[verbose] : identifier[print] ( literal[string] . identifier[format] ( identifier[len] ( identifier[kwargs] ))) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] (): identifier[print] ( literal[string] . identifier[format] ( identifier[k] , identifier[v] ))
def rename_channels(self, *, verbose=True, **kwargs): """Rename a set of channels. Parameters ---------- kwargs Keyword arguments of the form current:'new'. verbose : boolean (optional) Toggle talkback. Default is True """ # ensure that items will remain unique changed = kwargs.keys() for (k, v) in kwargs.items(): if v not in changed and v in self.keys(): raise wt_exceptions.NameNotUniqueError(v) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # compile references to items that are changing new = {} for (k, v) in kwargs.items(): obj = self[k] index = self.channel_names.index(k) # rename new[v] = (obj, index) Group._instances.pop(obj.fullpath, None) obj.natural_name = str(v) # remove old references del self[k] # depends on [control=['for'], data=[]] # apply new references names = list(self.channel_names) for (v, value) in new.items(): (obj, index) = value self[v] = obj names[index] = v # depends on [control=['for'], data=[]] self.channel_names = names # finish if verbose: print('{0} channel(s) renamed:'.format(len(kwargs))) for (k, v) in kwargs.items(): print(' {0} --> {1}'.format(k, v)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
def parse_blocks(lines): # type: (List[str]) -> List[List[str]] """Parse and return all possible blocks, popping off the start of `lines`. :param lines: list of lines :return: list of blocks, where each block is a list of lines """ blocks = [] while lines: if lines[0] == '': lines.pop(0) else: blocks.append(parse_block(lines)) return blocks
def function[parse_blocks, parameter[lines]]: constant[Parse and return all possible blocks, popping off the start of `lines`. :param lines: list of lines :return: list of blocks, where each block is a list of lines ] variable[blocks] assign[=] list[[]] while name[lines] begin[:] if compare[call[name[lines]][constant[0]] equal[==] constant[]] begin[:] call[name[lines].pop, parameter[constant[0]]] return[name[blocks]]
keyword[def] identifier[parse_blocks] ( identifier[lines] ): literal[string] identifier[blocks] =[] keyword[while] identifier[lines] : keyword[if] identifier[lines] [ literal[int] ]== literal[string] : identifier[lines] . identifier[pop] ( literal[int] ) keyword[else] : identifier[blocks] . identifier[append] ( identifier[parse_block] ( identifier[lines] )) keyword[return] identifier[blocks]
def parse_blocks(lines): # type: (List[str]) -> List[List[str]] 'Parse and return all possible blocks, popping off the start of `lines`.\n\n :param lines: list of lines\n :return: list of blocks, where each block is a list of lines\n ' blocks = [] while lines: if lines[0] == '': lines.pop(0) # depends on [control=['if'], data=[]] else: blocks.append(parse_block(lines)) # depends on [control=['while'], data=[]] return blocks
def getEvents(self, min_nr=1, nr=None, timeout=None): """ Returns a list of event data from submitted IO blocks. min_nr (int, None) When timeout is None, minimum number of events to collect before returning. If None, waits for all submitted events. nr (int, None) Maximum number of events to return. If None, set to maxevents given at construction or to the number of currently submitted events, whichever is larger. timeout (float, None): Time to wait for events. If None, become blocking. Returns a list of 3-tuples, containing: - completed AIOBlock instance - res, file-object-type-dependent value - res2, another file-object-type-dependent value """ if min_nr is None: min_nr = len(self._submitted) if nr is None: nr = max(len(self._submitted), self._maxevents) if timeout is None: timeoutp = None else: sec = int(timeout) timeout = libaio.timespec(sec, int((timeout - sec) * 1e9)) timeoutp = byref(timeout) event_buffer = (libaio.io_event * nr)() actual_nr = libaio.io_getevents( self._ctx, min_nr, nr, event_buffer, timeoutp, ) return [ self._eventToPython(event_buffer[x]) for x in xrange(actual_nr) ]
def function[getEvents, parameter[self, min_nr, nr, timeout]]: constant[ Returns a list of event data from submitted IO blocks. min_nr (int, None) When timeout is None, minimum number of events to collect before returning. If None, waits for all submitted events. nr (int, None) Maximum number of events to return. If None, set to maxevents given at construction or to the number of currently submitted events, whichever is larger. timeout (float, None): Time to wait for events. If None, become blocking. Returns a list of 3-tuples, containing: - completed AIOBlock instance - res, file-object-type-dependent value - res2, another file-object-type-dependent value ] if compare[name[min_nr] is constant[None]] begin[:] variable[min_nr] assign[=] call[name[len], parameter[name[self]._submitted]] if compare[name[nr] is constant[None]] begin[:] variable[nr] assign[=] call[name[max], parameter[call[name[len], parameter[name[self]._submitted]], name[self]._maxevents]] if compare[name[timeout] is constant[None]] begin[:] variable[timeoutp] assign[=] constant[None] variable[event_buffer] assign[=] call[binary_operation[name[libaio].io_event * name[nr]], parameter[]] variable[actual_nr] assign[=] call[name[libaio].io_getevents, parameter[name[self]._ctx, name[min_nr], name[nr], name[event_buffer], name[timeoutp]]] return[<ast.ListComp object at 0x7da1b02adc90>]
keyword[def] identifier[getEvents] ( identifier[self] , identifier[min_nr] = literal[int] , identifier[nr] = keyword[None] , identifier[timeout] = keyword[None] ): literal[string] keyword[if] identifier[min_nr] keyword[is] keyword[None] : identifier[min_nr] = identifier[len] ( identifier[self] . identifier[_submitted] ) keyword[if] identifier[nr] keyword[is] keyword[None] : identifier[nr] = identifier[max] ( identifier[len] ( identifier[self] . identifier[_submitted] ), identifier[self] . identifier[_maxevents] ) keyword[if] identifier[timeout] keyword[is] keyword[None] : identifier[timeoutp] = keyword[None] keyword[else] : identifier[sec] = identifier[int] ( identifier[timeout] ) identifier[timeout] = identifier[libaio] . identifier[timespec] ( identifier[sec] , identifier[int] (( identifier[timeout] - identifier[sec] )* literal[int] )) identifier[timeoutp] = identifier[byref] ( identifier[timeout] ) identifier[event_buffer] =( identifier[libaio] . identifier[io_event] * identifier[nr] )() identifier[actual_nr] = identifier[libaio] . identifier[io_getevents] ( identifier[self] . identifier[_ctx] , identifier[min_nr] , identifier[nr] , identifier[event_buffer] , identifier[timeoutp] , ) keyword[return] [ identifier[self] . identifier[_eventToPython] ( identifier[event_buffer] [ identifier[x] ]) keyword[for] identifier[x] keyword[in] identifier[xrange] ( identifier[actual_nr] ) ]
def getEvents(self, min_nr=1, nr=None, timeout=None): """ Returns a list of event data from submitted IO blocks. min_nr (int, None) When timeout is None, minimum number of events to collect before returning. If None, waits for all submitted events. nr (int, None) Maximum number of events to return. If None, set to maxevents given at construction or to the number of currently submitted events, whichever is larger. timeout (float, None): Time to wait for events. If None, become blocking. Returns a list of 3-tuples, containing: - completed AIOBlock instance - res, file-object-type-dependent value - res2, another file-object-type-dependent value """ if min_nr is None: min_nr = len(self._submitted) # depends on [control=['if'], data=['min_nr']] if nr is None: nr = max(len(self._submitted), self._maxevents) # depends on [control=['if'], data=['nr']] if timeout is None: timeoutp = None # depends on [control=['if'], data=[]] else: sec = int(timeout) timeout = libaio.timespec(sec, int((timeout - sec) * 1000000000.0)) timeoutp = byref(timeout) event_buffer = (libaio.io_event * nr)() actual_nr = libaio.io_getevents(self._ctx, min_nr, nr, event_buffer, timeoutp) return [self._eventToPython(event_buffer[x]) for x in xrange(actual_nr)]
def play_Track(self, track, channel=1, bpm=120): """Play a Track object.""" self.notify_listeners(self.MSG_PLAY_TRACK, {'track': track, 'channel' : channel, 'bpm': bpm}) for bar in track: res = self.play_Bar(bar, channel, bpm) if res != {}: bpm = res['bpm'] else: return {} return {'bpm': bpm}
def function[play_Track, parameter[self, track, channel, bpm]]: constant[Play a Track object.] call[name[self].notify_listeners, parameter[name[self].MSG_PLAY_TRACK, dictionary[[<ast.Constant object at 0x7da1b1304700>, <ast.Constant object at 0x7da1b1306fb0>, <ast.Constant object at 0x7da1b1307a30>], [<ast.Name object at 0x7da1b13075e0>, <ast.Name object at 0x7da1b1306380>, <ast.Name object at 0x7da1b1305180>]]]] for taget[name[bar]] in starred[name[track]] begin[:] variable[res] assign[=] call[name[self].play_Bar, parameter[name[bar], name[channel], name[bpm]]] if compare[name[res] not_equal[!=] dictionary[[], []]] begin[:] variable[bpm] assign[=] call[name[res]][constant[bpm]] return[dictionary[[<ast.Constant object at 0x7da1b1306890>], [<ast.Name object at 0x7da1b1305960>]]]
keyword[def] identifier[play_Track] ( identifier[self] , identifier[track] , identifier[channel] = literal[int] , identifier[bpm] = literal[int] ): literal[string] identifier[self] . identifier[notify_listeners] ( identifier[self] . identifier[MSG_PLAY_TRACK] ,{ literal[string] : identifier[track] , literal[string] : identifier[channel] , literal[string] : identifier[bpm] }) keyword[for] identifier[bar] keyword[in] identifier[track] : identifier[res] = identifier[self] . identifier[play_Bar] ( identifier[bar] , identifier[channel] , identifier[bpm] ) keyword[if] identifier[res] !={}: identifier[bpm] = identifier[res] [ literal[string] ] keyword[else] : keyword[return] {} keyword[return] { literal[string] : identifier[bpm] }
def play_Track(self, track, channel=1, bpm=120): """Play a Track object.""" self.notify_listeners(self.MSG_PLAY_TRACK, {'track': track, 'channel': channel, 'bpm': bpm}) for bar in track: res = self.play_Bar(bar, channel, bpm) if res != {}: bpm = res['bpm'] # depends on [control=['if'], data=['res']] else: return {} # depends on [control=['for'], data=['bar']] return {'bpm': bpm}
def GetMemMappedMB(self): '''Retrieves the amount of memory that is allocated to the virtual machine. Memory that is ballooned, swapped, or has never been accessed is excluded.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemMappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
def function[GetMemMappedMB, parameter[self]]: constant[Retrieves the amount of memory that is allocated to the virtual machine. Memory that is ballooned, swapped, or has never been accessed is excluded.] variable[counter] assign[=] call[name[c_uint], parameter[]] variable[ret] assign[=] call[name[vmGuestLib].VMGuestLib_GetMemMappedMB, parameter[name[self].handle.value, call[name[byref], parameter[name[counter]]]]] if compare[name[ret] not_equal[!=] name[VMGUESTLIB_ERROR_SUCCESS]] begin[:] <ast.Raise object at 0x7da1b0eee8f0> return[name[counter].value]
keyword[def] identifier[GetMemMappedMB] ( identifier[self] ): literal[string] identifier[counter] = identifier[c_uint] () identifier[ret] = identifier[vmGuestLib] . identifier[VMGuestLib_GetMemMappedMB] ( identifier[self] . identifier[handle] . identifier[value] , identifier[byref] ( identifier[counter] )) keyword[if] identifier[ret] != identifier[VMGUESTLIB_ERROR_SUCCESS] : keyword[raise] identifier[VMGuestLibException] ( identifier[ret] ) keyword[return] identifier[counter] . identifier[value]
def GetMemMappedMB(self): """Retrieves the amount of memory that is allocated to the virtual machine. Memory that is ballooned, swapped, or has never been accessed is excluded.""" counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemMappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) # depends on [control=['if'], data=['ret']] return counter.value
def clamp(color, min_v, max_v): """ Clamps a color such that the value is between min_v and max_v. """ h, s, v = rgb_to_hsv(*map(down_scale, color)) min_v, max_v = map(down_scale, (min_v, max_v)) v = min(max(min_v, v), max_v) return tuple(map(up_scale, hsv_to_rgb(h, s, v)))
def function[clamp, parameter[color, min_v, max_v]]: constant[ Clamps a color such that the value is between min_v and max_v. ] <ast.Tuple object at 0x7da18dc99de0> assign[=] call[name[rgb_to_hsv], parameter[<ast.Starred object at 0x7da18dc9b400>]] <ast.Tuple object at 0x7da18dc997e0> assign[=] call[name[map], parameter[name[down_scale], tuple[[<ast.Name object at 0x7da18dc98c10>, <ast.Name object at 0x7da18dc9a680>]]]] variable[v] assign[=] call[name[min], parameter[call[name[max], parameter[name[min_v], name[v]]], name[max_v]]] return[call[name[tuple], parameter[call[name[map], parameter[name[up_scale], call[name[hsv_to_rgb], parameter[name[h], name[s], name[v]]]]]]]]
keyword[def] identifier[clamp] ( identifier[color] , identifier[min_v] , identifier[max_v] ): literal[string] identifier[h] , identifier[s] , identifier[v] = identifier[rgb_to_hsv] (* identifier[map] ( identifier[down_scale] , identifier[color] )) identifier[min_v] , identifier[max_v] = identifier[map] ( identifier[down_scale] ,( identifier[min_v] , identifier[max_v] )) identifier[v] = identifier[min] ( identifier[max] ( identifier[min_v] , identifier[v] ), identifier[max_v] ) keyword[return] identifier[tuple] ( identifier[map] ( identifier[up_scale] , identifier[hsv_to_rgb] ( identifier[h] , identifier[s] , identifier[v] )))
def clamp(color, min_v, max_v): """ Clamps a color such that the value is between min_v and max_v. """ (h, s, v) = rgb_to_hsv(*map(down_scale, color)) (min_v, max_v) = map(down_scale, (min_v, max_v)) v = min(max(min_v, v), max_v) return tuple(map(up_scale, hsv_to_rgb(h, s, v)))
def show(cls, msg=None): """ Show the log interface on the page. """ if msg: cls.add(msg) cls.overlay.show() cls.overlay.el.bind("click", lambda x: cls.hide()) cls.el.style.display = "block" cls.bind()
def function[show, parameter[cls, msg]]: constant[ Show the log interface on the page. ] if name[msg] begin[:] call[name[cls].add, parameter[name[msg]]] call[name[cls].overlay.show, parameter[]] call[name[cls].overlay.el.bind, parameter[constant[click], <ast.Lambda object at 0x7da1b09ccb80>]] name[cls].el.style.display assign[=] constant[block] call[name[cls].bind, parameter[]]
keyword[def] identifier[show] ( identifier[cls] , identifier[msg] = keyword[None] ): literal[string] keyword[if] identifier[msg] : identifier[cls] . identifier[add] ( identifier[msg] ) identifier[cls] . identifier[overlay] . identifier[show] () identifier[cls] . identifier[overlay] . identifier[el] . identifier[bind] ( literal[string] , keyword[lambda] identifier[x] : identifier[cls] . identifier[hide] ()) identifier[cls] . identifier[el] . identifier[style] . identifier[display] = literal[string] identifier[cls] . identifier[bind] ()
def show(cls, msg=None): """ Show the log interface on the page. """ if msg: cls.add(msg) # depends on [control=['if'], data=[]] cls.overlay.show() cls.overlay.el.bind('click', lambda x: cls.hide()) cls.el.style.display = 'block' cls.bind()
def delete(ctx, opts, owner_repo_identifier, yes): """ Delete an entitlement from a repository. - OWNER/REPO/IDENTIFIER: Specify the OWNER namespace (i.e. user or org), and the REPO name that has an entitlement identified by IDENTIFIER. All separated by a slash. Example: 'your-org/your-repo/abcdef123456' Full CLI example: $ cloudsmith ents delete your-org/your-repo/abcdef123456 """ owner, repo, identifier = owner_repo_identifier delete_args = { "identifier": click.style(identifier, bold=True), "repository": click.style(repo, bold=True), } prompt = ( "delete the %(identifier)s entitlement from the %(repository)s " "repository" % delete_args ) if not utils.confirm_operation(prompt, assume_yes=yes): return click.secho( "Deleting %(identifier)s entitlement from the %(repository)s " "repository ... " % delete_args, nl=False, ) context_msg = "Failed to delete the entitlement!" with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): api.delete_entitlement(owner=owner, repo=repo, identifier=identifier) click.secho("OK", fg="green")
def function[delete, parameter[ctx, opts, owner_repo_identifier, yes]]: constant[ Delete an entitlement from a repository. - OWNER/REPO/IDENTIFIER: Specify the OWNER namespace (i.e. user or org), and the REPO name that has an entitlement identified by IDENTIFIER. All separated by a slash. Example: 'your-org/your-repo/abcdef123456' Full CLI example: $ cloudsmith ents delete your-org/your-repo/abcdef123456 ] <ast.Tuple object at 0x7da1b19d0f10> assign[=] name[owner_repo_identifier] variable[delete_args] assign[=] dictionary[[<ast.Constant object at 0x7da1b19d33d0>, <ast.Constant object at 0x7da1b19d1540>], [<ast.Call object at 0x7da1b19d3e80>, <ast.Call object at 0x7da1b19d35b0>]] variable[prompt] assign[=] binary_operation[constant[delete the %(identifier)s entitlement from the %(repository)s repository] <ast.Mod object at 0x7da2590d6920> name[delete_args]] if <ast.UnaryOp object at 0x7da1b19d3b80> begin[:] return[None] call[name[click].secho, parameter[binary_operation[constant[Deleting %(identifier)s entitlement from the %(repository)s repository ... ] <ast.Mod object at 0x7da2590d6920> name[delete_args]]]] variable[context_msg] assign[=] constant[Failed to delete the entitlement!] with call[name[handle_api_exceptions], parameter[name[ctx]]] begin[:] with call[name[maybe_spinner], parameter[name[opts]]] begin[:] call[name[api].delete_entitlement, parameter[]] call[name[click].secho, parameter[constant[OK]]]
keyword[def] identifier[delete] ( identifier[ctx] , identifier[opts] , identifier[owner_repo_identifier] , identifier[yes] ): literal[string] identifier[owner] , identifier[repo] , identifier[identifier] = identifier[owner_repo_identifier] identifier[delete_args] ={ literal[string] : identifier[click] . identifier[style] ( identifier[identifier] , identifier[bold] = keyword[True] ), literal[string] : identifier[click] . identifier[style] ( identifier[repo] , identifier[bold] = keyword[True] ), } identifier[prompt] =( literal[string] literal[string] % identifier[delete_args] ) keyword[if] keyword[not] identifier[utils] . identifier[confirm_operation] ( identifier[prompt] , identifier[assume_yes] = identifier[yes] ): keyword[return] identifier[click] . identifier[secho] ( literal[string] literal[string] % identifier[delete_args] , identifier[nl] = keyword[False] , ) identifier[context_msg] = literal[string] keyword[with] identifier[handle_api_exceptions] ( identifier[ctx] , identifier[opts] = identifier[opts] , identifier[context_msg] = identifier[context_msg] ): keyword[with] identifier[maybe_spinner] ( identifier[opts] ): identifier[api] . identifier[delete_entitlement] ( identifier[owner] = identifier[owner] , identifier[repo] = identifier[repo] , identifier[identifier] = identifier[identifier] ) identifier[click] . identifier[secho] ( literal[string] , identifier[fg] = literal[string] )
def delete(ctx, opts, owner_repo_identifier, yes): """ Delete an entitlement from a repository. - OWNER/REPO/IDENTIFIER: Specify the OWNER namespace (i.e. user or org), and the REPO name that has an entitlement identified by IDENTIFIER. All separated by a slash. Example: 'your-org/your-repo/abcdef123456' Full CLI example: $ cloudsmith ents delete your-org/your-repo/abcdef123456 """ (owner, repo, identifier) = owner_repo_identifier delete_args = {'identifier': click.style(identifier, bold=True), 'repository': click.style(repo, bold=True)} prompt = 'delete the %(identifier)s entitlement from the %(repository)s repository' % delete_args if not utils.confirm_operation(prompt, assume_yes=yes): return # depends on [control=['if'], data=[]] click.secho('Deleting %(identifier)s entitlement from the %(repository)s repository ... ' % delete_args, nl=False) context_msg = 'Failed to delete the entitlement!' with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): api.delete_entitlement(owner=owner, repo=repo, identifier=identifier) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] click.secho('OK', fg='green')
def make_movie(structures, output_filename="movie.mp4", zoom=1.0, fps=20, bitrate="10000k", quality=1, **kwargs): """ Generate a movie from a sequence of structures using vtk and ffmpeg. Args: structures ([Structure]): sequence of structures output_filename (str): filename for structure output. defaults to movie.mp4 zoom (float): A zoom to be applied to the visualizer. Defaults to 1.0. fps (int): Frames per second for the movie. Defaults to 20. bitrate (str): Video bitate. Defaults to "10000k" (fairly high quality). quality (int): A quality scale. Defaults to 1. \\*\\*kwargs: Any kwargs supported by StructureVis to modify the images generated. """ vis = StructureVis(**kwargs) vis.show_help = False vis.redraw() vis.zoom(zoom) sigfig = int(math.floor(math.log10(len(structures))) + 1) filename = "image{0:0" + str(sigfig) + "d}.png" for i, s in enumerate(structures): vis.set_structure(s) vis.write_image(filename.format(i), 3) filename = "image%0" + str(sigfig) + "d.png" args = ["ffmpeg", "-y", "-i", filename, "-q:v", str(quality), "-r", str(fps), "-b:v", str(bitrate), output_filename] subprocess.Popen(args)
def function[make_movie, parameter[structures, output_filename, zoom, fps, bitrate, quality]]: constant[ Generate a movie from a sequence of structures using vtk and ffmpeg. Args: structures ([Structure]): sequence of structures output_filename (str): filename for structure output. defaults to movie.mp4 zoom (float): A zoom to be applied to the visualizer. Defaults to 1.0. fps (int): Frames per second for the movie. Defaults to 20. bitrate (str): Video bitate. Defaults to "10000k" (fairly high quality). quality (int): A quality scale. Defaults to 1. \*\*kwargs: Any kwargs supported by StructureVis to modify the images generated. ] variable[vis] assign[=] call[name[StructureVis], parameter[]] name[vis].show_help assign[=] constant[False] call[name[vis].redraw, parameter[]] call[name[vis].zoom, parameter[name[zoom]]] variable[sigfig] assign[=] call[name[int], parameter[binary_operation[call[name[math].floor, parameter[call[name[math].log10, parameter[call[name[len], parameter[name[structures]]]]]]] + constant[1]]]] variable[filename] assign[=] binary_operation[binary_operation[constant[image{0:0] + call[name[str], parameter[name[sigfig]]]] + constant[d}.png]] for taget[tuple[[<ast.Name object at 0x7da204344880>, <ast.Name object at 0x7da204344a00>]]] in starred[call[name[enumerate], parameter[name[structures]]]] begin[:] call[name[vis].set_structure, parameter[name[s]]] call[name[vis].write_image, parameter[call[name[filename].format, parameter[name[i]]], constant[3]]] variable[filename] assign[=] binary_operation[binary_operation[constant[image%0] + call[name[str], parameter[name[sigfig]]]] + constant[d.png]] variable[args] assign[=] list[[<ast.Constant object at 0x7da18c4ceef0>, <ast.Constant object at 0x7da18c4ccfd0>, <ast.Constant object at 0x7da18c4cf6d0>, <ast.Name object at 0x7da1b2344340>, <ast.Constant object at 0x7da1b2344970>, <ast.Call object at 0x7da1b2344c70>, <ast.Constant object at 0x7da1b2345fc0>, <ast.Call object at 0x7da1b2346590>, <ast.Constant object at 0x7da1b2345e70>, <ast.Call object at 0x7da1b2344d90>, <ast.Name object at 0x7da1b2345ae0>]] call[name[subprocess].Popen, parameter[name[args]]]
keyword[def] identifier[make_movie] ( identifier[structures] , identifier[output_filename] = literal[string] , identifier[zoom] = literal[int] , identifier[fps] = literal[int] , identifier[bitrate] = literal[string] , identifier[quality] = literal[int] ,** identifier[kwargs] ): literal[string] identifier[vis] = identifier[StructureVis] (** identifier[kwargs] ) identifier[vis] . identifier[show_help] = keyword[False] identifier[vis] . identifier[redraw] () identifier[vis] . identifier[zoom] ( identifier[zoom] ) identifier[sigfig] = identifier[int] ( identifier[math] . identifier[floor] ( identifier[math] . identifier[log10] ( identifier[len] ( identifier[structures] )))+ literal[int] ) identifier[filename] = literal[string] + identifier[str] ( identifier[sigfig] )+ literal[string] keyword[for] identifier[i] , identifier[s] keyword[in] identifier[enumerate] ( identifier[structures] ): identifier[vis] . identifier[set_structure] ( identifier[s] ) identifier[vis] . identifier[write_image] ( identifier[filename] . identifier[format] ( identifier[i] ), literal[int] ) identifier[filename] = literal[string] + identifier[str] ( identifier[sigfig] )+ literal[string] identifier[args] =[ literal[string] , literal[string] , literal[string] , identifier[filename] , literal[string] , identifier[str] ( identifier[quality] ), literal[string] , identifier[str] ( identifier[fps] ), literal[string] , identifier[str] ( identifier[bitrate] ), identifier[output_filename] ] identifier[subprocess] . identifier[Popen] ( identifier[args] )
def make_movie(structures, output_filename='movie.mp4', zoom=1.0, fps=20, bitrate='10000k', quality=1, **kwargs): """ Generate a movie from a sequence of structures using vtk and ffmpeg. Args: structures ([Structure]): sequence of structures output_filename (str): filename for structure output. defaults to movie.mp4 zoom (float): A zoom to be applied to the visualizer. Defaults to 1.0. fps (int): Frames per second for the movie. Defaults to 20. bitrate (str): Video bitate. Defaults to "10000k" (fairly high quality). quality (int): A quality scale. Defaults to 1. \\*\\*kwargs: Any kwargs supported by StructureVis to modify the images generated. """ vis = StructureVis(**kwargs) vis.show_help = False vis.redraw() vis.zoom(zoom) sigfig = int(math.floor(math.log10(len(structures))) + 1) filename = 'image{0:0' + str(sigfig) + 'd}.png' for (i, s) in enumerate(structures): vis.set_structure(s) vis.write_image(filename.format(i), 3) # depends on [control=['for'], data=[]] filename = 'image%0' + str(sigfig) + 'd.png' args = ['ffmpeg', '-y', '-i', filename, '-q:v', str(quality), '-r', str(fps), '-b:v', str(bitrate), output_filename] subprocess.Popen(args)
def query_alternative_short_name(): """ Returns list of alternative short name by query query parameters --- tags: - Query functions parameters: - name: name in: query type: string required: false description: Alternative short name default: CVAP - name: entry_name in: query type: string required: false description: UniProt entry name default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10 """ args = get_args( request_args=request.args, allowed_str_args=['name', 'entry_name'], allowed_int_args=['limit'] ) return jsonify(query.alternative_short_name(**args))
def function[query_alternative_short_name, parameter[]]: constant[ Returns list of alternative short name by query query parameters --- tags: - Query functions parameters: - name: name in: query type: string required: false description: Alternative short name default: CVAP - name: entry_name in: query type: string required: false description: UniProt entry name default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10 ] variable[args] assign[=] call[name[get_args], parameter[]] return[call[name[jsonify], parameter[call[name[query].alternative_short_name, parameter[]]]]]
keyword[def] identifier[query_alternative_short_name] (): literal[string] identifier[args] = identifier[get_args] ( identifier[request_args] = identifier[request] . identifier[args] , identifier[allowed_str_args] =[ literal[string] , literal[string] ], identifier[allowed_int_args] =[ literal[string] ] ) keyword[return] identifier[jsonify] ( identifier[query] . identifier[alternative_short_name] (** identifier[args] ))
def query_alternative_short_name(): """ Returns list of alternative short name by query query parameters --- tags: - Query functions parameters: - name: name in: query type: string required: false description: Alternative short name default: CVAP - name: entry_name in: query type: string required: false description: UniProt entry name default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10 """ args = get_args(request_args=request.args, allowed_str_args=['name', 'entry_name'], allowed_int_args=['limit']) return jsonify(query.alternative_short_name(**args))
def rpoplpush(self, source, destination): """Emulate rpoplpush""" transfer_item = self.rpop(source) if transfer_item is not None: self.lpush(destination, transfer_item) return transfer_item
def function[rpoplpush, parameter[self, source, destination]]: constant[Emulate rpoplpush] variable[transfer_item] assign[=] call[name[self].rpop, parameter[name[source]]] if compare[name[transfer_item] is_not constant[None]] begin[:] call[name[self].lpush, parameter[name[destination], name[transfer_item]]] return[name[transfer_item]]
keyword[def] identifier[rpoplpush] ( identifier[self] , identifier[source] , identifier[destination] ): literal[string] identifier[transfer_item] = identifier[self] . identifier[rpop] ( identifier[source] ) keyword[if] identifier[transfer_item] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[lpush] ( identifier[destination] , identifier[transfer_item] ) keyword[return] identifier[transfer_item]
def rpoplpush(self, source, destination): """Emulate rpoplpush""" transfer_item = self.rpop(source) if transfer_item is not None: self.lpush(destination, transfer_item) # depends on [control=['if'], data=['transfer_item']] return transfer_item
def list(self, device=values.unset, sim=values.unset, status=values.unset, direction=values.unset, limit=None, page_size=None): """ Lists CommandInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode device: The device :param unicode sim: The sim :param unicode status: The status :param unicode direction: The direction :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.wireless.command.CommandInstance] """ return list(self.stream( device=device, sim=sim, status=status, direction=direction, limit=limit, page_size=page_size, ))
def function[list, parameter[self, device, sim, status, direction, limit, page_size]]: constant[ Lists CommandInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode device: The device :param unicode sim: The sim :param unicode status: The status :param unicode direction: The direction :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.wireless.command.CommandInstance] ] return[call[name[list], parameter[call[name[self].stream, parameter[]]]]]
keyword[def] identifier[list] ( identifier[self] , identifier[device] = identifier[values] . identifier[unset] , identifier[sim] = identifier[values] . identifier[unset] , identifier[status] = identifier[values] . identifier[unset] , identifier[direction] = identifier[values] . identifier[unset] , identifier[limit] = keyword[None] , identifier[page_size] = keyword[None] ): literal[string] keyword[return] identifier[list] ( identifier[self] . identifier[stream] ( identifier[device] = identifier[device] , identifier[sim] = identifier[sim] , identifier[status] = identifier[status] , identifier[direction] = identifier[direction] , identifier[limit] = identifier[limit] , identifier[page_size] = identifier[page_size] , ))
def list(self, device=values.unset, sim=values.unset, status=values.unset, direction=values.unset, limit=None, page_size=None): """ Lists CommandInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode device: The device :param unicode sim: The sim :param unicode status: The status :param unicode direction: The direction :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.wireless.command.CommandInstance] """ return list(self.stream(device=device, sim=sim, status=status, direction=direction, limit=limit, page_size=page_size))
def copy(self, source_path, dest_path, account=None, group_name=None): """Copy file from a path to another path. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: source_path(str): The path of the file to be copied. dest_path(str): The destination path where the file is going to be allocated. Raises: :exc:`~..OsmosisError`: if the file is not uploaded correctly. """ if 'core.windows.net' not in source_path and 'core.windows.net' not in dest_path: self.logger.error("Source or destination must be a azure storage url (format " "https://myaccount.blob.core.windows.net/mycontainer/myblob") raise OsmosisError # Check if source exists and can read if 'core.windows.net' in source_path: parse_url = _parse_url(source_path) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[ 0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) return bs.get_blob_to_path(parse_url.container_or_share_name, parse_url.file, dest_path) elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) return fs.get_file_to_path(parse_url.container_or_share_name, parse_url.path, parse_url.file, dest_path) else: raise ValueError("This azure storage type is not valid. It should be blob or file.") else: parse_url = _parse_url(dest_path) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[ 0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) return bs.create_blob_from_path(parse_url.container_or_share_name, parse_url.file, source_path) elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) return fs.create_file_from_path(parse_url.container_or_share_name, parse_url.path, parse_url.file, source_path) else: raise ValueError("This azure storage type is not valid. It should be blob or file.")
def function[copy, parameter[self, source_path, dest_path, account, group_name]]: constant[Copy file from a path to another path. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: source_path(str): The path of the file to be copied. dest_path(str): The destination path where the file is going to be allocated. Raises: :exc:`~..OsmosisError`: if the file is not uploaded correctly. ] if <ast.BoolOp object at 0x7da1b1712cb0> begin[:] call[name[self].logger.error, parameter[constant[Source or destination must be a azure storage url (format https://myaccount.blob.core.windows.net/mycontainer/myblob]]] <ast.Raise object at 0x7da1b1712f20> if compare[constant[core.windows.net] in name[source_path]] begin[:] variable[parse_url] assign[=] call[name[_parse_url], parameter[name[source_path]]] variable[key] assign[=] call[call[name[self].storage_client.storage_accounts.list_keys, parameter[name[self].resource_group_name, name[parse_url].account]].keys][constant[0]].value if compare[name[parse_url].file_type equal[==] constant[blob]] begin[:] variable[bs] assign[=] call[name[BlockBlobService], parameter[]] return[call[name[bs].get_blob_to_path, parameter[name[parse_url].container_or_share_name, name[parse_url].file, name[dest_path]]]]
keyword[def] identifier[copy] ( identifier[self] , identifier[source_path] , identifier[dest_path] , identifier[account] = keyword[None] , identifier[group_name] = keyword[None] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[source_path] keyword[and] literal[string] keyword[not] keyword[in] identifier[dest_path] : identifier[self] . identifier[logger] . identifier[error] ( literal[string] literal[string] ) keyword[raise] identifier[OsmosisError] keyword[if] literal[string] keyword[in] identifier[source_path] : identifier[parse_url] = identifier[_parse_url] ( identifier[source_path] ) identifier[key] = identifier[self] . identifier[storage_client] . identifier[storage_accounts] . identifier[list_keys] ( identifier[self] . identifier[resource_group_name] , identifier[parse_url] . identifier[account] ). identifier[keys] [ literal[int] ]. identifier[value] keyword[if] identifier[parse_url] . identifier[file_type] == literal[string] : identifier[bs] = identifier[BlockBlobService] ( identifier[account_name] = identifier[parse_url] . identifier[account] , identifier[account_key] = identifier[key] ) keyword[return] identifier[bs] . identifier[get_blob_to_path] ( identifier[parse_url] . identifier[container_or_share_name] , identifier[parse_url] . identifier[file] , identifier[dest_path] ) keyword[elif] identifier[parse_url] . identifier[file_type] == literal[string] : identifier[fs] = identifier[FileService] ( identifier[account_name] = identifier[parse_url] . identifier[account] , identifier[account_key] = identifier[key] ) keyword[return] identifier[fs] . identifier[get_file_to_path] ( identifier[parse_url] . identifier[container_or_share_name] , identifier[parse_url] . identifier[path] , identifier[parse_url] . identifier[file] , identifier[dest_path] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : identifier[parse_url] = identifier[_parse_url] ( identifier[dest_path] ) identifier[key] = identifier[self] . identifier[storage_client] . identifier[storage_accounts] . identifier[list_keys] ( identifier[self] . identifier[resource_group_name] , identifier[parse_url] . identifier[account] ). identifier[keys] [ literal[int] ]. identifier[value] keyword[if] identifier[parse_url] . identifier[file_type] == literal[string] : identifier[bs] = identifier[BlockBlobService] ( identifier[account_name] = identifier[parse_url] . identifier[account] , identifier[account_key] = identifier[key] ) keyword[return] identifier[bs] . identifier[create_blob_from_path] ( identifier[parse_url] . identifier[container_or_share_name] , identifier[parse_url] . identifier[file] , identifier[source_path] ) keyword[elif] identifier[parse_url] . identifier[file_type] == literal[string] : identifier[fs] = identifier[FileService] ( identifier[account_name] = identifier[parse_url] . identifier[account] , identifier[account_key] = identifier[key] ) keyword[return] identifier[fs] . identifier[create_file_from_path] ( identifier[parse_url] . identifier[container_or_share_name] , identifier[parse_url] . identifier[path] , identifier[parse_url] . identifier[file] , identifier[source_path] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] )
def copy(self, source_path, dest_path, account=None, group_name=None): """Copy file from a path to another path. The azure url format is https://myaccount.blob.core.windows.net/mycontainer/myblob. Args: source_path(str): The path of the file to be copied. dest_path(str): The destination path where the file is going to be allocated. Raises: :exc:`~..OsmosisError`: if the file is not uploaded correctly. """ if 'core.windows.net' not in source_path and 'core.windows.net' not in dest_path: self.logger.error('Source or destination must be a azure storage url (format https://myaccount.blob.core.windows.net/mycontainer/myblob') raise OsmosisError # depends on [control=['if'], data=[]] # Check if source exists and can read if 'core.windows.net' in source_path: parse_url = _parse_url(source_path) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) return bs.get_blob_to_path(parse_url.container_or_share_name, parse_url.file, dest_path) # depends on [control=['if'], data=[]] elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) return fs.get_file_to_path(parse_url.container_or_share_name, parse_url.path, parse_url.file, dest_path) # depends on [control=['if'], data=[]] else: raise ValueError('This azure storage type is not valid. It should be blob or file.') # depends on [control=['if'], data=['source_path']] else: parse_url = _parse_url(dest_path) key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, parse_url.account).keys[0].value if parse_url.file_type == 'blob': bs = BlockBlobService(account_name=parse_url.account, account_key=key) return bs.create_blob_from_path(parse_url.container_or_share_name, parse_url.file, source_path) # depends on [control=['if'], data=[]] elif parse_url.file_type == 'file': fs = FileService(account_name=parse_url.account, account_key=key) return fs.create_file_from_path(parse_url.container_or_share_name, parse_url.path, parse_url.file, source_path) # depends on [control=['if'], data=[]] else: raise ValueError('This azure storage type is not valid. It should be blob or file.')
def show_management_certificate(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 Return information about a management_certificate CLI Example: .. code-block:: bash salt-cloud -f get_management_certificate my-azure name=my_management_certificate \\ thumbalgorithm=sha1 thumbprint=0123456789ABCDEF ''' if call != 'function': raise SaltCloudSystemExit( 'The get_management_certificate function must be called with -f or --function.' ) if not conn: conn = get_conn() if kwargs is None: kwargs = {} if 'thumbprint' not in kwargs: raise SaltCloudSystemExit('A thumbprint must be specified as "thumbprint"') data = conn.get_management_certificate(kwargs['thumbprint']) return object_to_dict(data)
def function[show_management_certificate, parameter[kwargs, conn, call]]: constant[ .. versionadded:: 2015.8.0 Return information about a management_certificate CLI Example: .. code-block:: bash salt-cloud -f get_management_certificate my-azure name=my_management_certificate \ thumbalgorithm=sha1 thumbprint=0123456789ABCDEF ] if compare[name[call] not_equal[!=] constant[function]] begin[:] <ast.Raise object at 0x7da18f721d80> if <ast.UnaryOp object at 0x7da18f721f30> begin[:] variable[conn] assign[=] call[name[get_conn], parameter[]] if compare[name[kwargs] is constant[None]] begin[:] variable[kwargs] assign[=] dictionary[[], []] if compare[constant[thumbprint] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] <ast.Raise object at 0x7da18f723df0> variable[data] assign[=] call[name[conn].get_management_certificate, parameter[call[name[kwargs]][constant[thumbprint]]]] return[call[name[object_to_dict], parameter[name[data]]]]
keyword[def] identifier[show_management_certificate] ( identifier[kwargs] = keyword[None] , identifier[conn] = keyword[None] , identifier[call] = keyword[None] ): literal[string] keyword[if] identifier[call] != literal[string] : keyword[raise] identifier[SaltCloudSystemExit] ( literal[string] ) keyword[if] keyword[not] identifier[conn] : identifier[conn] = identifier[get_conn] () keyword[if] identifier[kwargs] keyword[is] keyword[None] : identifier[kwargs] ={} keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : keyword[raise] identifier[SaltCloudSystemExit] ( literal[string] ) identifier[data] = identifier[conn] . identifier[get_management_certificate] ( identifier[kwargs] [ literal[string] ]) keyword[return] identifier[object_to_dict] ( identifier[data] )
def show_management_certificate(kwargs=None, conn=None, call=None): """ .. versionadded:: 2015.8.0 Return information about a management_certificate CLI Example: .. code-block:: bash salt-cloud -f get_management_certificate my-azure name=my_management_certificate \\ thumbalgorithm=sha1 thumbprint=0123456789ABCDEF """ if call != 'function': raise SaltCloudSystemExit('The get_management_certificate function must be called with -f or --function.') # depends on [control=['if'], data=[]] if not conn: conn = get_conn() # depends on [control=['if'], data=[]] if kwargs is None: kwargs = {} # depends on [control=['if'], data=['kwargs']] if 'thumbprint' not in kwargs: raise SaltCloudSystemExit('A thumbprint must be specified as "thumbprint"') # depends on [control=['if'], data=[]] data = conn.get_management_certificate(kwargs['thumbprint']) return object_to_dict(data)
def from_edge_pairs(cls, vertices, edge_pairs): """ Create a DirectedGraph from a collection of vertices and a collection of pairs giving links between the vertices. """ vertices = set(vertices) edges = set() heads = {} tails = {} # Number the edges arbitrarily. edge_identifier = itertools.count() for tail, head in edge_pairs: edge = next(edge_identifier) edges.add(edge) heads[edge] = head tails[edge] = tail return cls._raw( vertices=vertices, edges=edges, heads=heads, tails=tails, )
def function[from_edge_pairs, parameter[cls, vertices, edge_pairs]]: constant[ Create a DirectedGraph from a collection of vertices and a collection of pairs giving links between the vertices. ] variable[vertices] assign[=] call[name[set], parameter[name[vertices]]] variable[edges] assign[=] call[name[set], parameter[]] variable[heads] assign[=] dictionary[[], []] variable[tails] assign[=] dictionary[[], []] variable[edge_identifier] assign[=] call[name[itertools].count, parameter[]] for taget[tuple[[<ast.Name object at 0x7da18ede7fd0>, <ast.Name object at 0x7da18ede7d60>]]] in starred[name[edge_pairs]] begin[:] variable[edge] assign[=] call[name[next], parameter[name[edge_identifier]]] call[name[edges].add, parameter[name[edge]]] call[name[heads]][name[edge]] assign[=] name[head] call[name[tails]][name[edge]] assign[=] name[tail] return[call[name[cls]._raw, parameter[]]]
keyword[def] identifier[from_edge_pairs] ( identifier[cls] , identifier[vertices] , identifier[edge_pairs] ): literal[string] identifier[vertices] = identifier[set] ( identifier[vertices] ) identifier[edges] = identifier[set] () identifier[heads] ={} identifier[tails] ={} identifier[edge_identifier] = identifier[itertools] . identifier[count] () keyword[for] identifier[tail] , identifier[head] keyword[in] identifier[edge_pairs] : identifier[edge] = identifier[next] ( identifier[edge_identifier] ) identifier[edges] . identifier[add] ( identifier[edge] ) identifier[heads] [ identifier[edge] ]= identifier[head] identifier[tails] [ identifier[edge] ]= identifier[tail] keyword[return] identifier[cls] . identifier[_raw] ( identifier[vertices] = identifier[vertices] , identifier[edges] = identifier[edges] , identifier[heads] = identifier[heads] , identifier[tails] = identifier[tails] , )
def from_edge_pairs(cls, vertices, edge_pairs): """ Create a DirectedGraph from a collection of vertices and a collection of pairs giving links between the vertices. """ vertices = set(vertices) edges = set() heads = {} tails = {} # Number the edges arbitrarily. edge_identifier = itertools.count() for (tail, head) in edge_pairs: edge = next(edge_identifier) edges.add(edge) heads[edge] = head tails[edge] = tail # depends on [control=['for'], data=[]] return cls._raw(vertices=vertices, edges=edges, heads=heads, tails=tails)
def set_message_last_post(cr, uid, pool, models): """ Given a list of models, set their 'message_last_post' fields to an estimated last post datetime. To be called in post-migration scripts :param cr: database cursor :param uid: user id, assumed to be openerp.SUPERUSER_ID :param pool: orm pool, assumed to be openerp.pooler.get_pool(cr.dbname) :param models: a list of model names for which 'message_last_post' needs \ to be filled :return: """ if type(models) is not list: models = [models] for model in models: model_pool = pool[model] cr.execute( "UPDATE {table} " "SET message_last_post=(SELECT max(mm.date) " "FROM mail_message mm " "WHERE mm.model=%s " "AND mm.date IS NOT NULL " "AND mm.res_id={table}.id)".format( table=model_pool._table), (model,) )
def function[set_message_last_post, parameter[cr, uid, pool, models]]: constant[ Given a list of models, set their 'message_last_post' fields to an estimated last post datetime. To be called in post-migration scripts :param cr: database cursor :param uid: user id, assumed to be openerp.SUPERUSER_ID :param pool: orm pool, assumed to be openerp.pooler.get_pool(cr.dbname) :param models: a list of model names for which 'message_last_post' needs to be filled :return: ] if compare[call[name[type], parameter[name[models]]] is_not name[list]] begin[:] variable[models] assign[=] list[[<ast.Name object at 0x7da20e9b19c0>]] for taget[name[model]] in starred[name[models]] begin[:] variable[model_pool] assign[=] call[name[pool]][name[model]] call[name[cr].execute, parameter[call[constant[UPDATE {table} SET message_last_post=(SELECT max(mm.date) FROM mail_message mm WHERE mm.model=%s AND mm.date IS NOT NULL AND mm.res_id={table}.id)].format, parameter[]], tuple[[<ast.Name object at 0x7da20e9b37c0>]]]]
keyword[def] identifier[set_message_last_post] ( identifier[cr] , identifier[uid] , identifier[pool] , identifier[models] ): literal[string] keyword[if] identifier[type] ( identifier[models] ) keyword[is] keyword[not] identifier[list] : identifier[models] =[ identifier[models] ] keyword[for] identifier[model] keyword[in] identifier[models] : identifier[model_pool] = identifier[pool] [ identifier[model] ] identifier[cr] . identifier[execute] ( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[table] = identifier[model_pool] . identifier[_table] ),( identifier[model] ,) )
def set_message_last_post(cr, uid, pool, models): """ Given a list of models, set their 'message_last_post' fields to an estimated last post datetime. To be called in post-migration scripts :param cr: database cursor :param uid: user id, assumed to be openerp.SUPERUSER_ID :param pool: orm pool, assumed to be openerp.pooler.get_pool(cr.dbname) :param models: a list of model names for which 'message_last_post' needs to be filled :return: """ if type(models) is not list: models = [models] # depends on [control=['if'], data=[]] for model in models: model_pool = pool[model] cr.execute('UPDATE {table} SET message_last_post=(SELECT max(mm.date) FROM mail_message mm WHERE mm.model=%s AND mm.date IS NOT NULL AND mm.res_id={table}.id)'.format(table=model_pool._table), (model,)) # depends on [control=['for'], data=['model']]
def write(self, path=None): """ Write all of the HostsEntry instances back to the hosts file :param path: override the write path :return: Dictionary containing counts """ written_count = 0 comments_written = 0 blanks_written = 0 ipv4_entries_written = 0 ipv6_entries_written = 0 if path: output_file_path = path else: output_file_path = self.hosts_path try: with open(output_file_path, 'w') as hosts_file: for written_count, line in enumerate(self.entries): if line.entry_type == 'comment': hosts_file.write(line.comment + "\n") comments_written += 1 if line.entry_type == 'blank': hosts_file.write("\n") blanks_written += 1 if line.entry_type == 'ipv4': hosts_file.write( "{0}\t{1}\n".format( line.address, ' '.join(line.names), ) ) ipv4_entries_written += 1 if line.entry_type == 'ipv6': hosts_file.write( "{0}\t{1}\n".format( line.address, ' '.join(line.names), )) ipv6_entries_written += 1 except: raise UnableToWriteHosts() return {'total_written': written_count + 1, 'comments_written': comments_written, 'blanks_written': blanks_written, 'ipv4_entries_written': ipv4_entries_written, 'ipv6_entries_written': ipv6_entries_written}
def function[write, parameter[self, path]]: constant[ Write all of the HostsEntry instances back to the hosts file :param path: override the write path :return: Dictionary containing counts ] variable[written_count] assign[=] constant[0] variable[comments_written] assign[=] constant[0] variable[blanks_written] assign[=] constant[0] variable[ipv4_entries_written] assign[=] constant[0] variable[ipv6_entries_written] assign[=] constant[0] if name[path] begin[:] variable[output_file_path] assign[=] name[path] <ast.Try object at 0x7da1b0783940> return[dictionary[[<ast.Constant object at 0x7da1b0780400>, <ast.Constant object at 0x7da1b0781810>, <ast.Constant object at 0x7da1b0782ad0>, <ast.Constant object at 0x7da1b07818a0>, <ast.Constant object at 0x7da1b0780e20>], [<ast.BinOp object at 0x7da1b0783250>, <ast.Name object at 0x7da1b07813c0>, <ast.Name object at 0x7da1b0782cb0>, <ast.Name object at 0x7da1b07810c0>, <ast.Name object at 0x7da1b0781900>]]]
keyword[def] identifier[write] ( identifier[self] , identifier[path] = keyword[None] ): literal[string] identifier[written_count] = literal[int] identifier[comments_written] = literal[int] identifier[blanks_written] = literal[int] identifier[ipv4_entries_written] = literal[int] identifier[ipv6_entries_written] = literal[int] keyword[if] identifier[path] : identifier[output_file_path] = identifier[path] keyword[else] : identifier[output_file_path] = identifier[self] . identifier[hosts_path] keyword[try] : keyword[with] identifier[open] ( identifier[output_file_path] , literal[string] ) keyword[as] identifier[hosts_file] : keyword[for] identifier[written_count] , identifier[line] keyword[in] identifier[enumerate] ( identifier[self] . identifier[entries] ): keyword[if] identifier[line] . identifier[entry_type] == literal[string] : identifier[hosts_file] . identifier[write] ( identifier[line] . identifier[comment] + literal[string] ) identifier[comments_written] += literal[int] keyword[if] identifier[line] . identifier[entry_type] == literal[string] : identifier[hosts_file] . identifier[write] ( literal[string] ) identifier[blanks_written] += literal[int] keyword[if] identifier[line] . identifier[entry_type] == literal[string] : identifier[hosts_file] . identifier[write] ( literal[string] . identifier[format] ( identifier[line] . identifier[address] , literal[string] . identifier[join] ( identifier[line] . identifier[names] ), ) ) identifier[ipv4_entries_written] += literal[int] keyword[if] identifier[line] . identifier[entry_type] == literal[string] : identifier[hosts_file] . identifier[write] ( literal[string] . identifier[format] ( identifier[line] . identifier[address] , literal[string] . identifier[join] ( identifier[line] . identifier[names] ),)) identifier[ipv6_entries_written] += literal[int] keyword[except] : keyword[raise] identifier[UnableToWriteHosts] () keyword[return] { literal[string] : identifier[written_count] + literal[int] , literal[string] : identifier[comments_written] , literal[string] : identifier[blanks_written] , literal[string] : identifier[ipv4_entries_written] , literal[string] : identifier[ipv6_entries_written] }
def write(self, path=None): """ Write all of the HostsEntry instances back to the hosts file :param path: override the write path :return: Dictionary containing counts """ written_count = 0 comments_written = 0 blanks_written = 0 ipv4_entries_written = 0 ipv6_entries_written = 0 if path: output_file_path = path # depends on [control=['if'], data=[]] else: output_file_path = self.hosts_path try: with open(output_file_path, 'w') as hosts_file: for (written_count, line) in enumerate(self.entries): if line.entry_type == 'comment': hosts_file.write(line.comment + '\n') comments_written += 1 # depends on [control=['if'], data=[]] if line.entry_type == 'blank': hosts_file.write('\n') blanks_written += 1 # depends on [control=['if'], data=[]] if line.entry_type == 'ipv4': hosts_file.write('{0}\t{1}\n'.format(line.address, ' '.join(line.names))) ipv4_entries_written += 1 # depends on [control=['if'], data=[]] if line.entry_type == 'ipv6': hosts_file.write('{0}\t{1}\n'.format(line.address, ' '.join(line.names))) ipv6_entries_written += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['hosts_file']] # depends on [control=['try'], data=[]] except: raise UnableToWriteHosts() # depends on [control=['except'], data=[]] return {'total_written': written_count + 1, 'comments_written': comments_written, 'blanks_written': blanks_written, 'ipv4_entries_written': ipv4_entries_written, 'ipv6_entries_written': ipv6_entries_written}
def decode_string(data, encoding='hex'): ''' Decode string :param data: string to decode :param encoding: encoding to use (default: 'hex') :return: decoded string ''' if six.PY2: return data.decode(encoding) else: return codecs.decode(data.encode('ascii'), encoding)
def function[decode_string, parameter[data, encoding]]: constant[ Decode string :param data: string to decode :param encoding: encoding to use (default: 'hex') :return: decoded string ] if name[six].PY2 begin[:] return[call[name[data].decode, parameter[name[encoding]]]]
keyword[def] identifier[decode_string] ( identifier[data] , identifier[encoding] = literal[string] ): literal[string] keyword[if] identifier[six] . identifier[PY2] : keyword[return] identifier[data] . identifier[decode] ( identifier[encoding] ) keyword[else] : keyword[return] identifier[codecs] . identifier[decode] ( identifier[data] . identifier[encode] ( literal[string] ), identifier[encoding] )
def decode_string(data, encoding='hex'): """ Decode string :param data: string to decode :param encoding: encoding to use (default: 'hex') :return: decoded string """ if six.PY2: return data.decode(encoding) # depends on [control=['if'], data=[]] else: return codecs.decode(data.encode('ascii'), encoding)
def _get_all_group_items(network_id): """ Get all the resource group items in the network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id """ base_qry = db.DBSession.query(ResourceGroupItem) item_qry = base_qry.join(Scenario).filter(Scenario.network_id==network_id) x = time.time() logging.info("Getting all items") all_items = db.DBSession.execute(item_qry.statement).fetchall() log.info("%s groups jointly retrieved in %s", len(all_items), time.time()-x) logging.info("items retrieved. Processing results...") x = time.time() item_dict = dict() for item in all_items: items = item_dict.get(item.scenario_id, []) items.append(item) item_dict[item.scenario_id] = items logging.info("items processed in %s", time.time()-x) return item_dict
def function[_get_all_group_items, parameter[network_id]]: constant[ Get all the resource group items in the network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id ] variable[base_qry] assign[=] call[name[db].DBSession.query, parameter[name[ResourceGroupItem]]] variable[item_qry] assign[=] call[call[name[base_qry].join, parameter[name[Scenario]]].filter, parameter[compare[name[Scenario].network_id equal[==] name[network_id]]]] variable[x] assign[=] call[name[time].time, parameter[]] call[name[logging].info, parameter[constant[Getting all items]]] variable[all_items] assign[=] call[call[name[db].DBSession.execute, parameter[name[item_qry].statement]].fetchall, parameter[]] call[name[log].info, parameter[constant[%s groups jointly retrieved in %s], call[name[len], parameter[name[all_items]]], binary_operation[call[name[time].time, parameter[]] - name[x]]]] call[name[logging].info, parameter[constant[items retrieved. Processing results...]]] variable[x] assign[=] call[name[time].time, parameter[]] variable[item_dict] assign[=] call[name[dict], parameter[]] for taget[name[item]] in starred[name[all_items]] begin[:] variable[items] assign[=] call[name[item_dict].get, parameter[name[item].scenario_id, list[[]]]] call[name[items].append, parameter[name[item]]] call[name[item_dict]][name[item].scenario_id] assign[=] name[items] call[name[logging].info, parameter[constant[items processed in %s], binary_operation[call[name[time].time, parameter[]] - name[x]]]] return[name[item_dict]]
keyword[def] identifier[_get_all_group_items] ( identifier[network_id] ): literal[string] identifier[base_qry] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[ResourceGroupItem] ) identifier[item_qry] = identifier[base_qry] . identifier[join] ( identifier[Scenario] ). identifier[filter] ( identifier[Scenario] . identifier[network_id] == identifier[network_id] ) identifier[x] = identifier[time] . identifier[time] () identifier[logging] . identifier[info] ( literal[string] ) identifier[all_items] = identifier[db] . identifier[DBSession] . identifier[execute] ( identifier[item_qry] . identifier[statement] ). identifier[fetchall] () identifier[log] . identifier[info] ( literal[string] , identifier[len] ( identifier[all_items] ), identifier[time] . identifier[time] ()- identifier[x] ) identifier[logging] . identifier[info] ( literal[string] ) identifier[x] = identifier[time] . identifier[time] () identifier[item_dict] = identifier[dict] () keyword[for] identifier[item] keyword[in] identifier[all_items] : identifier[items] = identifier[item_dict] . identifier[get] ( identifier[item] . identifier[scenario_id] ,[]) identifier[items] . identifier[append] ( identifier[item] ) identifier[item_dict] [ identifier[item] . identifier[scenario_id] ]= identifier[items] identifier[logging] . identifier[info] ( literal[string] , identifier[time] . identifier[time] ()- identifier[x] ) keyword[return] identifier[item_dict]
def _get_all_group_items(network_id): """ Get all the resource group items in the network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id """ base_qry = db.DBSession.query(ResourceGroupItem) item_qry = base_qry.join(Scenario).filter(Scenario.network_id == network_id) x = time.time() logging.info('Getting all items') all_items = db.DBSession.execute(item_qry.statement).fetchall() log.info('%s groups jointly retrieved in %s', len(all_items), time.time() - x) logging.info('items retrieved. Processing results...') x = time.time() item_dict = dict() for item in all_items: items = item_dict.get(item.scenario_id, []) items.append(item) item_dict[item.scenario_id] = items # depends on [control=['for'], data=['item']] logging.info('items processed in %s', time.time() - x) return item_dict
async def destroy_tournament(self, t: Tournament): """ completely removes a tournament from Challonge |methcoro| Note: |from_api| Deletes a tournament along with all its associated records. There is no undo, so use with care! Raises: APIException """ await self.connection('DELETE', 'tournaments/{}'.format(t.id)) if t in self.tournaments: self.tournaments.remove(t)
<ast.AsyncFunctionDef object at 0x7da2054a7e20>
keyword[async] keyword[def] identifier[destroy_tournament] ( identifier[self] , identifier[t] : identifier[Tournament] ): literal[string] keyword[await] identifier[self] . identifier[connection] ( literal[string] , literal[string] . identifier[format] ( identifier[t] . identifier[id] )) keyword[if] identifier[t] keyword[in] identifier[self] . identifier[tournaments] : identifier[self] . identifier[tournaments] . identifier[remove] ( identifier[t] )
async def destroy_tournament(self, t: Tournament): """ completely removes a tournament from Challonge |methcoro| Note: |from_api| Deletes a tournament along with all its associated records. There is no undo, so use with care! Raises: APIException """ await self.connection('DELETE', 'tournaments/{}'.format(t.id)) if t in self.tournaments: self.tournaments.remove(t) # depends on [control=['if'], data=['t']]
def _setup(self): """ Prepare the system for using ``ansible-galaxy`` and returns None. :return: None """ role_directory = os.path.join(self._config.scenario.directory, self.options['roles-path']) if not os.path.isdir(role_directory): os.makedirs(role_directory)
def function[_setup, parameter[self]]: constant[ Prepare the system for using ``ansible-galaxy`` and returns None. :return: None ] variable[role_directory] assign[=] call[name[os].path.join, parameter[name[self]._config.scenario.directory, call[name[self].options][constant[roles-path]]]] if <ast.UnaryOp object at 0x7da1b1c8ac20> begin[:] call[name[os].makedirs, parameter[name[role_directory]]]
keyword[def] identifier[_setup] ( identifier[self] ): literal[string] identifier[role_directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_config] . identifier[scenario] . identifier[directory] , identifier[self] . identifier[options] [ literal[string] ]) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[role_directory] ): identifier[os] . identifier[makedirs] ( identifier[role_directory] )
def _setup(self): """ Prepare the system for using ``ansible-galaxy`` and returns None. :return: None """ role_directory = os.path.join(self._config.scenario.directory, self.options['roles-path']) if not os.path.isdir(role_directory): os.makedirs(role_directory) # depends on [control=['if'], data=[]]