code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def links(self): """ 解析页面上所有的链接并返回一个生成器 """ ass = self.xpath('//a') for a in ass: text = a.xpath('./text()').extract_first() url = a.xpath('./@href').extract_first() link = Link(text, url) yield link
def function[links, parameter[self]]: constant[ 解析页面上所有的链接并返回一个生成器 ] variable[ass] assign[=] call[name[self].xpath, parameter[constant[//a]]] for taget[name[a]] in starred[name[ass]] begin[:] variable[text] assign[=] call[call[name[a].xpath, parameter[constant[./text()]]].extract_first, parameter[]] variable[url] assign[=] call[call[name[a].xpath, parameter[constant[./@href]]].extract_first, parameter[]] variable[link] assign[=] call[name[Link], parameter[name[text], name[url]]] <ast.Yield object at 0x7da1b168e0e0>
keyword[def] identifier[links] ( identifier[self] ): literal[string] identifier[ass] = identifier[self] . identifier[xpath] ( literal[string] ) keyword[for] identifier[a] keyword[in] identifier[ass] : identifier[text] = identifier[a] . identifier[xpath] ( literal[string] ). identifier[extract_first] () identifier[url] = identifier[a] . identifier[xpath] ( literal[string] ). identifier[extract_first] () identifier[link] = identifier[Link] ( identifier[text] , identifier[url] ) keyword[yield] identifier[link]
def links(self): """ 解析页面上所有的链接并返回一个生成器 """ ass = self.xpath('//a') for a in ass: text = a.xpath('./text()').extract_first() url = a.xpath('./@href').extract_first() link = Link(text, url) yield link # depends on [control=['for'], data=['a']]
def data_validation(self, in_nc): """Check the necessary dimensions and variables in the input netcdf data""" data_nc = Dataset(in_nc) dims = list(data_nc.dimensions) if dims not in self.dims_oi: data_nc.close() raise Exception("{0} {1}".format(self.error_messages[1], dims)) nc_vars = list(data_nc.variables) if nc_vars == self.vars_oi[0]: self.runoff_vars = [self.vars_oi[0][-1]] elif nc_vars == self.vars_oi[1]: self.runoff_vars = [self.vars_oi[1][-1]] else: data_nc.close() raise Exception("{0} {1}".format(self.error_messages[2], nc_vars)) data_nc.close()
def function[data_validation, parameter[self, in_nc]]: constant[Check the necessary dimensions and variables in the input netcdf data] variable[data_nc] assign[=] call[name[Dataset], parameter[name[in_nc]]] variable[dims] assign[=] call[name[list], parameter[name[data_nc].dimensions]] if compare[name[dims] <ast.NotIn object at 0x7da2590d7190> name[self].dims_oi] begin[:] call[name[data_nc].close, parameter[]] <ast.Raise object at 0x7da20c992c20> variable[nc_vars] assign[=] call[name[list], parameter[name[data_nc].variables]] if compare[name[nc_vars] equal[==] call[name[self].vars_oi][constant[0]]] begin[:] name[self].runoff_vars assign[=] list[[<ast.Subscript object at 0x7da20c9937c0>]] call[name[data_nc].close, parameter[]]
keyword[def] identifier[data_validation] ( identifier[self] , identifier[in_nc] ): literal[string] identifier[data_nc] = identifier[Dataset] ( identifier[in_nc] ) identifier[dims] = identifier[list] ( identifier[data_nc] . identifier[dimensions] ) keyword[if] identifier[dims] keyword[not] keyword[in] identifier[self] . identifier[dims_oi] : identifier[data_nc] . identifier[close] () keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[self] . identifier[error_messages] [ literal[int] ], identifier[dims] )) identifier[nc_vars] = identifier[list] ( identifier[data_nc] . identifier[variables] ) keyword[if] identifier[nc_vars] == identifier[self] . identifier[vars_oi] [ literal[int] ]: identifier[self] . identifier[runoff_vars] =[ identifier[self] . identifier[vars_oi] [ literal[int] ][- literal[int] ]] keyword[elif] identifier[nc_vars] == identifier[self] . identifier[vars_oi] [ literal[int] ]: identifier[self] . identifier[runoff_vars] =[ identifier[self] . identifier[vars_oi] [ literal[int] ][- literal[int] ]] keyword[else] : identifier[data_nc] . identifier[close] () keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[self] . identifier[error_messages] [ literal[int] ], identifier[nc_vars] )) identifier[data_nc] . identifier[close] ()
def data_validation(self, in_nc): """Check the necessary dimensions and variables in the input netcdf data""" data_nc = Dataset(in_nc) dims = list(data_nc.dimensions) if dims not in self.dims_oi: data_nc.close() raise Exception('{0} {1}'.format(self.error_messages[1], dims)) # depends on [control=['if'], data=['dims']] nc_vars = list(data_nc.variables) if nc_vars == self.vars_oi[0]: self.runoff_vars = [self.vars_oi[0][-1]] # depends on [control=['if'], data=[]] elif nc_vars == self.vars_oi[1]: self.runoff_vars = [self.vars_oi[1][-1]] # depends on [control=['if'], data=[]] else: data_nc.close() raise Exception('{0} {1}'.format(self.error_messages[2], nc_vars)) data_nc.close()
def hist(self, dimension=None, num_bins=20, bin_range=None, adjoin=True, **kwargs): """Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of element and histogram or just the histogram """ from ..operation import histogram if not isinstance(dimension, list): dimension = [dimension] hists = [] for d in dimension[::-1]: hist = histogram(self, num_bins=num_bins, bin_range=bin_range, dimension=d, **kwargs) hists.append(hist) if adjoin: layout = self for didx in range(len(dimension)): layout = layout << hists[didx] elif len(dimension) > 1: layout = Layout(hists) else: layout = hists[0] return layout
def function[hist, parameter[self, dimension, num_bins, bin_range, adjoin]]: constant[Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of element and histogram or just the histogram ] from relative_module[operation] import module[histogram] if <ast.UnaryOp object at 0x7da20c7cb9a0> begin[:] variable[dimension] assign[=] list[[<ast.Name object at 0x7da20c7c9780>]] variable[hists] assign[=] list[[]] for taget[name[d]] in starred[call[name[dimension]][<ast.Slice object at 0x7da20c7c8f40>]] begin[:] variable[hist] assign[=] call[name[histogram], parameter[name[self]]] call[name[hists].append, parameter[name[hist]]] if name[adjoin] begin[:] variable[layout] assign[=] name[self] for taget[name[didx]] in starred[call[name[range], parameter[call[name[len], parameter[name[dimension]]]]]] begin[:] variable[layout] assign[=] binary_operation[name[layout] <ast.LShift object at 0x7da2590d69e0> call[name[hists]][name[didx]]] return[name[layout]]
keyword[def] identifier[hist] ( identifier[self] , identifier[dimension] = keyword[None] , identifier[num_bins] = literal[int] , identifier[bin_range] = keyword[None] , identifier[adjoin] = keyword[True] ,** identifier[kwargs] ): literal[string] keyword[from] .. identifier[operation] keyword[import] identifier[histogram] keyword[if] keyword[not] identifier[isinstance] ( identifier[dimension] , identifier[list] ): identifier[dimension] =[ identifier[dimension] ] identifier[hists] =[] keyword[for] identifier[d] keyword[in] identifier[dimension] [::- literal[int] ]: identifier[hist] = identifier[histogram] ( identifier[self] , identifier[num_bins] = identifier[num_bins] , identifier[bin_range] = identifier[bin_range] , identifier[dimension] = identifier[d] ,** identifier[kwargs] ) identifier[hists] . identifier[append] ( identifier[hist] ) keyword[if] identifier[adjoin] : identifier[layout] = identifier[self] keyword[for] identifier[didx] keyword[in] identifier[range] ( identifier[len] ( identifier[dimension] )): identifier[layout] = identifier[layout] << identifier[hists] [ identifier[didx] ] keyword[elif] identifier[len] ( identifier[dimension] )> literal[int] : identifier[layout] = identifier[Layout] ( identifier[hists] ) keyword[else] : identifier[layout] = identifier[hists] [ literal[int] ] keyword[return] identifier[layout]
def hist(self, dimension=None, num_bins=20, bin_range=None, adjoin=True, **kwargs): """Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of element and histogram or just the histogram """ from ..operation import histogram if not isinstance(dimension, list): dimension = [dimension] # depends on [control=['if'], data=[]] hists = [] for d in dimension[::-1]: hist = histogram(self, num_bins=num_bins, bin_range=bin_range, dimension=d, **kwargs) hists.append(hist) # depends on [control=['for'], data=['d']] if adjoin: layout = self for didx in range(len(dimension)): layout = layout << hists[didx] # depends on [control=['for'], data=['didx']] # depends on [control=['if'], data=[]] elif len(dimension) > 1: layout = Layout(hists) # depends on [control=['if'], data=[]] else: layout = hists[0] return layout
def get_column(column_name, node, context): """Get a column by name from the selectable. Args: column_name: str, name of the column to retrieve. node: SqlNode, the node the column is being retrieved for. context: CompilationContext, compilation specific metadata. Returns: column, the SQLAlchemy column if found. Raises an AssertionError otherwise. """ column = try_get_column(column_name, node, context) if column is None: selectable = get_node_selectable(node, context) raise AssertionError( u'Column "{}" not found in selectable "{}". Columns present are {}. ' u'Context is {}.'.format(column_name, selectable.original, [col.name for col in selectable.c], context)) return column
def function[get_column, parameter[column_name, node, context]]: constant[Get a column by name from the selectable. Args: column_name: str, name of the column to retrieve. node: SqlNode, the node the column is being retrieved for. context: CompilationContext, compilation specific metadata. Returns: column, the SQLAlchemy column if found. Raises an AssertionError otherwise. ] variable[column] assign[=] call[name[try_get_column], parameter[name[column_name], name[node], name[context]]] if compare[name[column] is constant[None]] begin[:] variable[selectable] assign[=] call[name[get_node_selectable], parameter[name[node], name[context]]] <ast.Raise object at 0x7da1b170e860> return[name[column]]
keyword[def] identifier[get_column] ( identifier[column_name] , identifier[node] , identifier[context] ): literal[string] identifier[column] = identifier[try_get_column] ( identifier[column_name] , identifier[node] , identifier[context] ) keyword[if] identifier[column] keyword[is] keyword[None] : identifier[selectable] = identifier[get_node_selectable] ( identifier[node] , identifier[context] ) keyword[raise] identifier[AssertionError] ( literal[string] literal[string] . identifier[format] ( identifier[column_name] , identifier[selectable] . identifier[original] , [ identifier[col] . identifier[name] keyword[for] identifier[col] keyword[in] identifier[selectable] . identifier[c] ], identifier[context] )) keyword[return] identifier[column]
def get_column(column_name, node, context): """Get a column by name from the selectable. Args: column_name: str, name of the column to retrieve. node: SqlNode, the node the column is being retrieved for. context: CompilationContext, compilation specific metadata. Returns: column, the SQLAlchemy column if found. Raises an AssertionError otherwise. """ column = try_get_column(column_name, node, context) if column is None: selectable = get_node_selectable(node, context) raise AssertionError(u'Column "{}" not found in selectable "{}". Columns present are {}. Context is {}.'.format(column_name, selectable.original, [col.name for col in selectable.c], context)) # depends on [control=['if'], data=[]] return column
def store(self, filename=None, label=None, desc=None, date=None): """Store object to mat-file. TODO: determine format specification """ date = date if date else datetime.now() date = date.replace(microsecond=0).isoformat() filename = filename if filename else date + '.mat' matfile = { 'model': str(type(self)), 'date': date, 'dim': len(self.init_sol.shape), 'dimlesses': self.coeffs, 'init_solution': self.init_sol, 'num_iters': self.num_iters, 'num_nodes': self.num_nodes, 'order': self.order, 'originals': self.originals, 'pumping': self.getPumping(), 'spatial_step': self.dx, 'time_step': self.dt, } if desc: matfile['desc'] = desc if label: matfile['label'] = label savemat(filename, matfile)
def function[store, parameter[self, filename, label, desc, date]]: constant[Store object to mat-file. TODO: determine format specification ] variable[date] assign[=] <ast.IfExp object at 0x7da20c794a00> variable[date] assign[=] call[call[name[date].replace, parameter[]].isoformat, parameter[]] variable[filename] assign[=] <ast.IfExp object at 0x7da18f7234c0> variable[matfile] assign[=] dictionary[[<ast.Constant object at 0x7da18f721210>, <ast.Constant object at 0x7da18f7226b0>, <ast.Constant object at 0x7da18f7201f0>, <ast.Constant object at 0x7da18f720730>, <ast.Constant object at 0x7da18f720940>, <ast.Constant object at 0x7da18f722680>, <ast.Constant object at 0x7da18f723a30>, <ast.Constant object at 0x7da18f812800>, <ast.Constant object at 0x7da18f810f40>, <ast.Constant object at 0x7da18f811db0>, <ast.Constant object at 0x7da18f811300>, <ast.Constant object at 0x7da18f811b10>], [<ast.Call object at 0x7da18f810490>, <ast.Name object at 0x7da18f810850>, <ast.Call object at 0x7da18f810370>, <ast.Attribute object at 0x7da18f811e10>, <ast.Attribute object at 0x7da18f810520>, <ast.Attribute object at 0x7da18f810e20>, <ast.Attribute object at 0x7da18f8129b0>, <ast.Attribute object at 0x7da18f8132e0>, <ast.Attribute object at 0x7da18f811ba0>, <ast.Call object at 0x7da18f8120b0>, <ast.Attribute object at 0x7da18f813490>, <ast.Attribute object at 0x7da18f810af0>]] if name[desc] begin[:] call[name[matfile]][constant[desc]] assign[=] name[desc] if name[label] begin[:] call[name[matfile]][constant[label]] assign[=] name[label] call[name[savemat], parameter[name[filename], name[matfile]]]
keyword[def] identifier[store] ( identifier[self] , identifier[filename] = keyword[None] , identifier[label] = keyword[None] , identifier[desc] = keyword[None] , identifier[date] = keyword[None] ): literal[string] identifier[date] = identifier[date] keyword[if] identifier[date] keyword[else] identifier[datetime] . identifier[now] () identifier[date] = identifier[date] . identifier[replace] ( identifier[microsecond] = literal[int] ). identifier[isoformat] () identifier[filename] = identifier[filename] keyword[if] identifier[filename] keyword[else] identifier[date] + literal[string] identifier[matfile] ={ literal[string] : identifier[str] ( identifier[type] ( identifier[self] )), literal[string] : identifier[date] , literal[string] : identifier[len] ( identifier[self] . identifier[init_sol] . identifier[shape] ), literal[string] : identifier[self] . identifier[coeffs] , literal[string] : identifier[self] . identifier[init_sol] , literal[string] : identifier[self] . identifier[num_iters] , literal[string] : identifier[self] . identifier[num_nodes] , literal[string] : identifier[self] . identifier[order] , literal[string] : identifier[self] . identifier[originals] , literal[string] : identifier[self] . identifier[getPumping] (), literal[string] : identifier[self] . identifier[dx] , literal[string] : identifier[self] . identifier[dt] , } keyword[if] identifier[desc] : identifier[matfile] [ literal[string] ]= identifier[desc] keyword[if] identifier[label] : identifier[matfile] [ literal[string] ]= identifier[label] identifier[savemat] ( identifier[filename] , identifier[matfile] )
def store(self, filename=None, label=None, desc=None, date=None): """Store object to mat-file. TODO: determine format specification """ date = date if date else datetime.now() date = date.replace(microsecond=0).isoformat() filename = filename if filename else date + '.mat' matfile = {'model': str(type(self)), 'date': date, 'dim': len(self.init_sol.shape), 'dimlesses': self.coeffs, 'init_solution': self.init_sol, 'num_iters': self.num_iters, 'num_nodes': self.num_nodes, 'order': self.order, 'originals': self.originals, 'pumping': self.getPumping(), 'spatial_step': self.dx, 'time_step': self.dt} if desc: matfile['desc'] = desc # depends on [control=['if'], data=[]] if label: matfile['label'] = label # depends on [control=['if'], data=[]] savemat(filename, matfile)
def additions_remove(**kwargs): ''' Remove VirtualBox Guest Additions. Firstly it tries to uninstall itself by executing '/opt/VBoxGuestAdditions-VERSION/uninstall.run uninstall'. It uses the CD, connected by VirtualBox if it failes. CLI Example: .. code-block:: bash salt '*' vbox_guest.additions_remove salt '*' vbox_guest.additions_remove force=True :param force: force VirtualBox Guest Additions removing :type force: bool :return: True if VirtualBox Guest Additions were removed successfully else False ''' kernel = __grains__.get('kernel', '') if kernel == 'Linux': ret = _additions_remove_linux() if not ret: ret = _additions_remove_use_cd(**kwargs) return ret
def function[additions_remove, parameter[]]: constant[ Remove VirtualBox Guest Additions. Firstly it tries to uninstall itself by executing '/opt/VBoxGuestAdditions-VERSION/uninstall.run uninstall'. It uses the CD, connected by VirtualBox if it failes. CLI Example: .. code-block:: bash salt '*' vbox_guest.additions_remove salt '*' vbox_guest.additions_remove force=True :param force: force VirtualBox Guest Additions removing :type force: bool :return: True if VirtualBox Guest Additions were removed successfully else False ] variable[kernel] assign[=] call[name[__grains__].get, parameter[constant[kernel], constant[]]] if compare[name[kernel] equal[==] constant[Linux]] begin[:] variable[ret] assign[=] call[name[_additions_remove_linux], parameter[]] if <ast.UnaryOp object at 0x7da204564ca0> begin[:] variable[ret] assign[=] call[name[_additions_remove_use_cd], parameter[]] return[name[ret]]
keyword[def] identifier[additions_remove] (** identifier[kwargs] ): literal[string] identifier[kernel] = identifier[__grains__] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[kernel] == literal[string] : identifier[ret] = identifier[_additions_remove_linux] () keyword[if] keyword[not] identifier[ret] : identifier[ret] = identifier[_additions_remove_use_cd] (** identifier[kwargs] ) keyword[return] identifier[ret]
def additions_remove(**kwargs): """ Remove VirtualBox Guest Additions. Firstly it tries to uninstall itself by executing '/opt/VBoxGuestAdditions-VERSION/uninstall.run uninstall'. It uses the CD, connected by VirtualBox if it failes. CLI Example: .. code-block:: bash salt '*' vbox_guest.additions_remove salt '*' vbox_guest.additions_remove force=True :param force: force VirtualBox Guest Additions removing :type force: bool :return: True if VirtualBox Guest Additions were removed successfully else False """ kernel = __grains__.get('kernel', '') if kernel == 'Linux': ret = _additions_remove_linux() # depends on [control=['if'], data=[]] if not ret: ret = _additions_remove_use_cd(**kwargs) # depends on [control=['if'], data=[]] return ret
def num_samples(input_filepath): ''' Show number of samples (0 if unavailable). Parameters ---------- input_filepath : str Path to audio file. Returns ------- n_samples : int total number of samples in audio file. Returns 0 if empty or unavailable ''' validate_input_file(input_filepath) output = soxi(input_filepath, 's') if output == '0': logger.warning("Number of samples unavailable for %s", input_filepath) return int(output)
def function[num_samples, parameter[input_filepath]]: constant[ Show number of samples (0 if unavailable). Parameters ---------- input_filepath : str Path to audio file. Returns ------- n_samples : int total number of samples in audio file. Returns 0 if empty or unavailable ] call[name[validate_input_file], parameter[name[input_filepath]]] variable[output] assign[=] call[name[soxi], parameter[name[input_filepath], constant[s]]] if compare[name[output] equal[==] constant[0]] begin[:] call[name[logger].warning, parameter[constant[Number of samples unavailable for %s], name[input_filepath]]] return[call[name[int], parameter[name[output]]]]
keyword[def] identifier[num_samples] ( identifier[input_filepath] ): literal[string] identifier[validate_input_file] ( identifier[input_filepath] ) identifier[output] = identifier[soxi] ( identifier[input_filepath] , literal[string] ) keyword[if] identifier[output] == literal[string] : identifier[logger] . identifier[warning] ( literal[string] , identifier[input_filepath] ) keyword[return] identifier[int] ( identifier[output] )
def num_samples(input_filepath): """ Show number of samples (0 if unavailable). Parameters ---------- input_filepath : str Path to audio file. Returns ------- n_samples : int total number of samples in audio file. Returns 0 if empty or unavailable """ validate_input_file(input_filepath) output = soxi(input_filepath, 's') if output == '0': logger.warning('Number of samples unavailable for %s', input_filepath) # depends on [control=['if'], data=[]] return int(output)
def get_all_activities(self, autoscale_group, activity_ids=None, max_records=None, next_token=None): """ Get all activities for the given autoscaling group. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter :type autoscale_group: str or :class:`boto.ec2.autoscale.group.AutoScalingGroup` object :param autoscale_group: The auto scaling group to get activities on. :type max_records: int :param max_records: Maximum amount of activities to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.activity.Activity` instances. """ name = autoscale_group if isinstance(autoscale_group, AutoScalingGroup): name = autoscale_group.name params = {'AutoScalingGroupName' : name} if max_records: params['MaxRecords'] = max_records if next_token: params['NextToken'] = next_token if activity_ids: self.build_list_params(params, activity_ids, 'ActivityIds') return self.get_list('DescribeScalingActivities', params, [('member', Activity)])
def function[get_all_activities, parameter[self, autoscale_group, activity_ids, max_records, next_token]]: constant[ Get all activities for the given autoscaling group. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter :type autoscale_group: str or :class:`boto.ec2.autoscale.group.AutoScalingGroup` object :param autoscale_group: The auto scaling group to get activities on. :type max_records: int :param max_records: Maximum amount of activities to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.activity.Activity` instances. ] variable[name] assign[=] name[autoscale_group] if call[name[isinstance], parameter[name[autoscale_group], name[AutoScalingGroup]]] begin[:] variable[name] assign[=] name[autoscale_group].name variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2651b40>], [<ast.Name object at 0x7da1b2650430>]] if name[max_records] begin[:] call[name[params]][constant[MaxRecords]] assign[=] name[max_records] if name[next_token] begin[:] call[name[params]][constant[NextToken]] assign[=] name[next_token] if name[activity_ids] begin[:] call[name[self].build_list_params, parameter[name[params], name[activity_ids], constant[ActivityIds]]] return[call[name[self].get_list, parameter[constant[DescribeScalingActivities], name[params], list[[<ast.Tuple object at 0x7da1b2650310>]]]]]
keyword[def] identifier[get_all_activities] ( identifier[self] , identifier[autoscale_group] , identifier[activity_ids] = keyword[None] , identifier[max_records] = keyword[None] , identifier[next_token] = keyword[None] ): literal[string] identifier[name] = identifier[autoscale_group] keyword[if] identifier[isinstance] ( identifier[autoscale_group] , identifier[AutoScalingGroup] ): identifier[name] = identifier[autoscale_group] . identifier[name] identifier[params] ={ literal[string] : identifier[name] } keyword[if] identifier[max_records] : identifier[params] [ literal[string] ]= identifier[max_records] keyword[if] identifier[next_token] : identifier[params] [ literal[string] ]= identifier[next_token] keyword[if] identifier[activity_ids] : identifier[self] . identifier[build_list_params] ( identifier[params] , identifier[activity_ids] , literal[string] ) keyword[return] identifier[self] . identifier[get_list] ( literal[string] , identifier[params] ,[( literal[string] , identifier[Activity] )])
def get_all_activities(self, autoscale_group, activity_ids=None, max_records=None, next_token=None): """ Get all activities for the given autoscaling group. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter :type autoscale_group: str or :class:`boto.ec2.autoscale.group.AutoScalingGroup` object :param autoscale_group: The auto scaling group to get activities on. :type max_records: int :param max_records: Maximum amount of activities to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.activity.Activity` instances. """ name = autoscale_group if isinstance(autoscale_group, AutoScalingGroup): name = autoscale_group.name # depends on [control=['if'], data=[]] params = {'AutoScalingGroupName': name} if max_records: params['MaxRecords'] = max_records # depends on [control=['if'], data=[]] if next_token: params['NextToken'] = next_token # depends on [control=['if'], data=[]] if activity_ids: self.build_list_params(params, activity_ids, 'ActivityIds') # depends on [control=['if'], data=[]] return self.get_list('DescribeScalingActivities', params, [('member', Activity)])
def save_to_folders(self, parameter_space, folder_name, runs): """ Save results to a folder structure. """ self.space_to_folders(self.db.get_results(), {}, parameter_space, runs, folder_name)
def function[save_to_folders, parameter[self, parameter_space, folder_name, runs]]: constant[ Save results to a folder structure. ] call[name[self].space_to_folders, parameter[call[name[self].db.get_results, parameter[]], dictionary[[], []], name[parameter_space], name[runs], name[folder_name]]]
keyword[def] identifier[save_to_folders] ( identifier[self] , identifier[parameter_space] , identifier[folder_name] , identifier[runs] ): literal[string] identifier[self] . identifier[space_to_folders] ( identifier[self] . identifier[db] . identifier[get_results] (),{}, identifier[parameter_space] , identifier[runs] , identifier[folder_name] )
def save_to_folders(self, parameter_space, folder_name, runs): """ Save results to a folder structure. """ self.space_to_folders(self.db.get_results(), {}, parameter_space, runs, folder_name)
def parse_msg_sender(filename, sender_known=True): """Given a filename returns the sender and the message. Here the message is assumed to be a whole MIME message or just message body. >>> sender, msg = parse_msg_sender('msg.eml') >>> sender, msg = parse_msg_sender('msg_body') If you don't want to consider the sender's name in your classification algorithm: >>> parse_msg_sender(filename, False) """ import sys kwargs = {} if sys.version_info > (3, 0): kwargs["encoding"] = "utf8" sender, msg = None, None if os.path.isfile(filename) and not is_sender_filename(filename): with open(filename, **kwargs) as f: msg = f.read() sender = u'' if sender_known: sender_filename = build_sender_filename(filename) if os.path.exists(sender_filename): with open(sender_filename) as sender_file: sender = sender_file.read().strip() else: # if sender isn't found then the next line fails # and it is ok lines = msg.splitlines() for line in lines: match = re.match('From:(.*)', line) if match: sender = match.group(1) break return (sender, msg)
def function[parse_msg_sender, parameter[filename, sender_known]]: constant[Given a filename returns the sender and the message. Here the message is assumed to be a whole MIME message or just message body. >>> sender, msg = parse_msg_sender('msg.eml') >>> sender, msg = parse_msg_sender('msg_body') If you don't want to consider the sender's name in your classification algorithm: >>> parse_msg_sender(filename, False) ] import module[sys] variable[kwargs] assign[=] dictionary[[], []] if compare[name[sys].version_info greater[>] tuple[[<ast.Constant object at 0x7da1b22e9990>, <ast.Constant object at 0x7da1b22e9c90>]]] begin[:] call[name[kwargs]][constant[encoding]] assign[=] constant[utf8] <ast.Tuple object at 0x7da1b22e9f00> assign[=] tuple[[<ast.Constant object at 0x7da1b22e8040>, <ast.Constant object at 0x7da1b22eaf20>]] if <ast.BoolOp object at 0x7da1b22eac50> begin[:] with call[name[open], parameter[name[filename]]] begin[:] variable[msg] assign[=] call[name[f].read, parameter[]] variable[sender] assign[=] constant[] if name[sender_known] begin[:] variable[sender_filename] assign[=] call[name[build_sender_filename], parameter[name[filename]]] if call[name[os].path.exists, parameter[name[sender_filename]]] begin[:] with call[name[open], parameter[name[sender_filename]]] begin[:] variable[sender] assign[=] call[call[name[sender_file].read, parameter[]].strip, parameter[]] return[tuple[[<ast.Name object at 0x7da1b1eb61a0>, <ast.Name object at 0x7da1b1eb5720>]]]
keyword[def] identifier[parse_msg_sender] ( identifier[filename] , identifier[sender_known] = keyword[True] ): literal[string] keyword[import] identifier[sys] identifier[kwargs] ={} keyword[if] identifier[sys] . identifier[version_info] >( literal[int] , literal[int] ): identifier[kwargs] [ literal[string] ]= literal[string] identifier[sender] , identifier[msg] = keyword[None] , keyword[None] keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filename] ) keyword[and] keyword[not] identifier[is_sender_filename] ( identifier[filename] ): keyword[with] identifier[open] ( identifier[filename] ,** identifier[kwargs] ) keyword[as] identifier[f] : identifier[msg] = identifier[f] . identifier[read] () identifier[sender] = literal[string] keyword[if] identifier[sender_known] : identifier[sender_filename] = identifier[build_sender_filename] ( identifier[filename] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[sender_filename] ): keyword[with] identifier[open] ( identifier[sender_filename] ) keyword[as] identifier[sender_file] : identifier[sender] = identifier[sender_file] . identifier[read] (). identifier[strip] () keyword[else] : identifier[lines] = identifier[msg] . identifier[splitlines] () keyword[for] identifier[line] keyword[in] identifier[lines] : identifier[match] = identifier[re] . identifier[match] ( literal[string] , identifier[line] ) keyword[if] identifier[match] : identifier[sender] = identifier[match] . identifier[group] ( literal[int] ) keyword[break] keyword[return] ( identifier[sender] , identifier[msg] )
def parse_msg_sender(filename, sender_known=True): """Given a filename returns the sender and the message. Here the message is assumed to be a whole MIME message or just message body. >>> sender, msg = parse_msg_sender('msg.eml') >>> sender, msg = parse_msg_sender('msg_body') If you don't want to consider the sender's name in your classification algorithm: >>> parse_msg_sender(filename, False) """ import sys kwargs = {} if sys.version_info > (3, 0): kwargs['encoding'] = 'utf8' # depends on [control=['if'], data=[]] (sender, msg) = (None, None) if os.path.isfile(filename) and (not is_sender_filename(filename)): with open(filename, **kwargs) as f: msg = f.read() sender = u'' if sender_known: sender_filename = build_sender_filename(filename) if os.path.exists(sender_filename): with open(sender_filename) as sender_file: sender = sender_file.read().strip() # depends on [control=['with'], data=['sender_file']] # depends on [control=['if'], data=[]] else: # if sender isn't found then the next line fails # and it is ok lines = msg.splitlines() for line in lines: match = re.match('From:(.*)', line) if match: sender = match.group(1) break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['open', 'f']] # depends on [control=['if'], data=[]] return (sender, msg)
def dict_to_qs(dct): """ Takes a dictionary and uses it to create a query string. """ itms = ["%s=%s" % (key, val) for key, val in list(dct.items()) if val is not None] return "&".join(itms)
def function[dict_to_qs, parameter[dct]]: constant[ Takes a dictionary and uses it to create a query string. ] variable[itms] assign[=] <ast.ListComp object at 0x7da1b0528a30> return[call[constant[&].join, parameter[name[itms]]]]
keyword[def] identifier[dict_to_qs] ( identifier[dct] ): literal[string] identifier[itms] =[ literal[string] %( identifier[key] , identifier[val] ) keyword[for] identifier[key] , identifier[val] keyword[in] identifier[list] ( identifier[dct] . identifier[items] ()) keyword[if] identifier[val] keyword[is] keyword[not] keyword[None] ] keyword[return] literal[string] . identifier[join] ( identifier[itms] )
def dict_to_qs(dct): """ Takes a dictionary and uses it to create a query string. """ itms = ['%s=%s' % (key, val) for (key, val) in list(dct.items()) if val is not None] return '&'.join(itms)
def MaxPooling( inputs, pool_size, strides=None, padding='valid', data_format='channels_last'): """ Same as `tf.layers.MaxPooling2D`. Default strides is equal to pool_size. """ if strides is None: strides = pool_size layer = tf.layers.MaxPooling2D(pool_size, strides, padding=padding, data_format=data_format) ret = layer.apply(inputs, scope=tf.get_variable_scope()) return tf.identity(ret, name='output')
def function[MaxPooling, parameter[inputs, pool_size, strides, padding, data_format]]: constant[ Same as `tf.layers.MaxPooling2D`. Default strides is equal to pool_size. ] if compare[name[strides] is constant[None]] begin[:] variable[strides] assign[=] name[pool_size] variable[layer] assign[=] call[name[tf].layers.MaxPooling2D, parameter[name[pool_size], name[strides]]] variable[ret] assign[=] call[name[layer].apply, parameter[name[inputs]]] return[call[name[tf].identity, parameter[name[ret]]]]
keyword[def] identifier[MaxPooling] ( identifier[inputs] , identifier[pool_size] , identifier[strides] = keyword[None] , identifier[padding] = literal[string] , identifier[data_format] = literal[string] ): literal[string] keyword[if] identifier[strides] keyword[is] keyword[None] : identifier[strides] = identifier[pool_size] identifier[layer] = identifier[tf] . identifier[layers] . identifier[MaxPooling2D] ( identifier[pool_size] , identifier[strides] , identifier[padding] = identifier[padding] , identifier[data_format] = identifier[data_format] ) identifier[ret] = identifier[layer] . identifier[apply] ( identifier[inputs] , identifier[scope] = identifier[tf] . identifier[get_variable_scope] ()) keyword[return] identifier[tf] . identifier[identity] ( identifier[ret] , identifier[name] = literal[string] )
def MaxPooling(inputs, pool_size, strides=None, padding='valid', data_format='channels_last'): """ Same as `tf.layers.MaxPooling2D`. Default strides is equal to pool_size. """ if strides is None: strides = pool_size # depends on [control=['if'], data=['strides']] layer = tf.layers.MaxPooling2D(pool_size, strides, padding=padding, data_format=data_format) ret = layer.apply(inputs, scope=tf.get_variable_scope()) return tf.identity(ret, name='output')
def write(bar, offset, data): """Write data to PCI board. Parameters ---------- bar : BaseAddressRegister BAR to write. offset : int Address offset in BAR to write. data : bytes Data to write. Returns ------- None Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> pypci.write(b[0].bar[2], 0x04, b'\x01') >>> data = struct.pack('<I', 1234567) >>> pypci.write(b[0].bar[2], 0x00, data) """ if type(data) not in [bytes, bytearray]: msg = 'data should be bytes or bytearray type' raise TypeError(msg) size = len(data) verify_access_range(bar, offset, size) if bar.type == 'io': return io_write(bar, offset, data) if bar.type == 'mem': return mem_write(bar, offset, data) return
def function[write, parameter[bar, offset, data]]: constant[Write data to PCI board. Parameters ---------- bar : BaseAddressRegister BAR to write. offset : int Address offset in BAR to write. data : bytes Data to write. Returns ------- None Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> pypci.write(b[0].bar[2], 0x04, b'') >>> data = struct.pack('<I', 1234567) >>> pypci.write(b[0].bar[2], 0x00, data) ] if compare[call[name[type], parameter[name[data]]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Name object at 0x7da1b0bb09d0>, <ast.Name object at 0x7da1b0bb0880>]]] begin[:] variable[msg] assign[=] constant[data should be bytes or bytearray type] <ast.Raise object at 0x7da1b0bb08e0> variable[size] assign[=] call[name[len], parameter[name[data]]] call[name[verify_access_range], parameter[name[bar], name[offset], name[size]]] if compare[name[bar].type equal[==] constant[io]] begin[:] return[call[name[io_write], parameter[name[bar], name[offset], name[data]]]] if compare[name[bar].type equal[==] constant[mem]] begin[:] return[call[name[mem_write], parameter[name[bar], name[offset], name[data]]]] return[None]
keyword[def] identifier[write] ( identifier[bar] , identifier[offset] , identifier[data] ): literal[string] keyword[if] identifier[type] ( identifier[data] ) keyword[not] keyword[in] [ identifier[bytes] , identifier[bytearray] ]: identifier[msg] = literal[string] keyword[raise] identifier[TypeError] ( identifier[msg] ) identifier[size] = identifier[len] ( identifier[data] ) identifier[verify_access_range] ( identifier[bar] , identifier[offset] , identifier[size] ) keyword[if] identifier[bar] . identifier[type] == literal[string] : keyword[return] identifier[io_write] ( identifier[bar] , identifier[offset] , identifier[data] ) keyword[if] identifier[bar] . identifier[type] == literal[string] : keyword[return] identifier[mem_write] ( identifier[bar] , identifier[offset] , identifier[data] ) keyword[return]
def write(bar, offset, data): """Write data to PCI board. Parameters ---------- bar : BaseAddressRegister BAR to write. offset : int Address offset in BAR to write. data : bytes Data to write. Returns ------- None Examples -------- >>> b = pypci.lspci(vendor=0x1147, device=3214) >>> pypci.write(b[0].bar[2], 0x04, b'\x01') >>> data = struct.pack('<I', 1234567) >>> pypci.write(b[0].bar[2], 0x00, data) """ if type(data) not in [bytes, bytearray]: msg = 'data should be bytes or bytearray type' raise TypeError(msg) # depends on [control=['if'], data=[]] size = len(data) verify_access_range(bar, offset, size) if bar.type == 'io': return io_write(bar, offset, data) # depends on [control=['if'], data=[]] if bar.type == 'mem': return mem_write(bar, offset, data) # depends on [control=['if'], data=[]] return
def _writeSuperLinks(self, superLinks, fileObject): """ Write SuperLinks to File Method """ for slink in superLinks: fileObject.write('SLINK %s %s\n' % ( slink.slinkNumber, slink.numPipes)) for node in slink.superNodes: fileObject.write('NODE %s %.2f %.2f %.6f %s %s %s %.6f %.6f\n' % ( node.nodeNumber, node.groundSurfaceElev, node.invertElev, node.manholeSA, node.nodeInletCode, node.cellI, node.cellJ, node.weirSideLength, node.orificeDiameter)) for pipe in slink.pipes: fileObject.write('PIPE %s %s %.6f %.6f %.6f %.6f %.2f %.6f %.6f\n' % ( pipe.pipeNumber, pipe.xSecType, pipe.diameterOrHeight, pipe.width, pipe.slope, pipe.roughness, pipe.length, pipe.conductance, pipe.drainSpacing))
def function[_writeSuperLinks, parameter[self, superLinks, fileObject]]: constant[ Write SuperLinks to File Method ] for taget[name[slink]] in starred[name[superLinks]] begin[:] call[name[fileObject].write, parameter[binary_operation[constant[SLINK %s %s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c6ab3d0>, <ast.Attribute object at 0x7da20c6ab2e0>]]]]] for taget[name[node]] in starred[name[slink].superNodes] begin[:] call[name[fileObject].write, parameter[binary_operation[constant[NODE %s %.2f %.2f %.6f %s %s %s %.6f %.6f ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c6abc10>, <ast.Attribute object at 0x7da20c6a8310>, <ast.Attribute object at 0x7da20c6a8e50>, <ast.Attribute object at 0x7da20c6ab160>, <ast.Attribute object at 0x7da20c6a9930>, <ast.Attribute object at 0x7da20c6a8c40>, <ast.Attribute object at 0x7da20c6aa530>, <ast.Attribute object at 0x7da20c6a9ff0>, <ast.Attribute object at 0x7da20c6a9060>]]]]] for taget[name[pipe]] in starred[name[slink].pipes] begin[:] call[name[fileObject].write, parameter[binary_operation[constant[PIPE %s %s %.6f %.6f %.6f %.6f %.2f %.6f %.6f ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c6ab880>, <ast.Attribute object at 0x7da20c6a8700>, <ast.Attribute object at 0x7da18f813ee0>, <ast.Attribute object at 0x7da18f813f70>, <ast.Attribute object at 0x7da18f813b80>, <ast.Attribute object at 0x7da18f812bf0>, <ast.Attribute object at 0x7da18f811630>, <ast.Attribute object at 0x7da18f813670>, <ast.Attribute object at 0x7da18f813eb0>]]]]]
keyword[def] identifier[_writeSuperLinks] ( identifier[self] , identifier[superLinks] , identifier[fileObject] ): literal[string] keyword[for] identifier[slink] keyword[in] identifier[superLinks] : identifier[fileObject] . identifier[write] ( literal[string] %( identifier[slink] . identifier[slinkNumber] , identifier[slink] . identifier[numPipes] )) keyword[for] identifier[node] keyword[in] identifier[slink] . identifier[superNodes] : identifier[fileObject] . identifier[write] ( literal[string] %( identifier[node] . identifier[nodeNumber] , identifier[node] . identifier[groundSurfaceElev] , identifier[node] . identifier[invertElev] , identifier[node] . identifier[manholeSA] , identifier[node] . identifier[nodeInletCode] , identifier[node] . identifier[cellI] , identifier[node] . identifier[cellJ] , identifier[node] . identifier[weirSideLength] , identifier[node] . identifier[orificeDiameter] )) keyword[for] identifier[pipe] keyword[in] identifier[slink] . identifier[pipes] : identifier[fileObject] . identifier[write] ( literal[string] %( identifier[pipe] . identifier[pipeNumber] , identifier[pipe] . identifier[xSecType] , identifier[pipe] . identifier[diameterOrHeight] , identifier[pipe] . identifier[width] , identifier[pipe] . identifier[slope] , identifier[pipe] . identifier[roughness] , identifier[pipe] . identifier[length] , identifier[pipe] . identifier[conductance] , identifier[pipe] . identifier[drainSpacing] ))
def _writeSuperLinks(self, superLinks, fileObject): """ Write SuperLinks to File Method """ for slink in superLinks: fileObject.write('SLINK %s %s\n' % (slink.slinkNumber, slink.numPipes)) for node in slink.superNodes: fileObject.write('NODE %s %.2f %.2f %.6f %s %s %s %.6f %.6f\n' % (node.nodeNumber, node.groundSurfaceElev, node.invertElev, node.manholeSA, node.nodeInletCode, node.cellI, node.cellJ, node.weirSideLength, node.orificeDiameter)) # depends on [control=['for'], data=['node']] for pipe in slink.pipes: fileObject.write('PIPE %s %s %.6f %.6f %.6f %.6f %.2f %.6f %.6f\n' % (pipe.pipeNumber, pipe.xSecType, pipe.diameterOrHeight, pipe.width, pipe.slope, pipe.roughness, pipe.length, pipe.conductance, pipe.drainSpacing)) # depends on [control=['for'], data=['pipe']] # depends on [control=['for'], data=['slink']]
def _consumers(self): """ Gets consumer's map from app config :return: consumers map """ app_config = self.lti_kwargs['app'].config config = app_config.get('PYLTI_CONFIG', dict()) consumers = config.get('consumers', dict()) return consumers
def function[_consumers, parameter[self]]: constant[ Gets consumer's map from app config :return: consumers map ] variable[app_config] assign[=] call[name[self].lti_kwargs][constant[app]].config variable[config] assign[=] call[name[app_config].get, parameter[constant[PYLTI_CONFIG], call[name[dict], parameter[]]]] variable[consumers] assign[=] call[name[config].get, parameter[constant[consumers], call[name[dict], parameter[]]]] return[name[consumers]]
keyword[def] identifier[_consumers] ( identifier[self] ): literal[string] identifier[app_config] = identifier[self] . identifier[lti_kwargs] [ literal[string] ]. identifier[config] identifier[config] = identifier[app_config] . identifier[get] ( literal[string] , identifier[dict] ()) identifier[consumers] = identifier[config] . identifier[get] ( literal[string] , identifier[dict] ()) keyword[return] identifier[consumers]
def _consumers(self): """ Gets consumer's map from app config :return: consumers map """ app_config = self.lti_kwargs['app'].config config = app_config.get('PYLTI_CONFIG', dict()) consumers = config.get('consumers', dict()) return consumers
def submit(self): """Submit this torrent and create a new task""" if self.api._req_lixian_add_task_bt(self): self.submitted = True return True return False
def function[submit, parameter[self]]: constant[Submit this torrent and create a new task] if call[name[self].api._req_lixian_add_task_bt, parameter[name[self]]] begin[:] name[self].submitted assign[=] constant[True] return[constant[True]] return[constant[False]]
keyword[def] identifier[submit] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[api] . identifier[_req_lixian_add_task_bt] ( identifier[self] ): identifier[self] . identifier[submitted] = keyword[True] keyword[return] keyword[True] keyword[return] keyword[False]
def submit(self): """Submit this torrent and create a new task""" if self.api._req_lixian_add_task_bt(self): self.submitted = True return True # depends on [control=['if'], data=[]] return False
def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02)
def function[get_event_iter_returns, parameter[self, jid, minions, timeout]]: constant[ Gather the return data from the event system, break hard when timeout is reached. ] call[name[log].trace, parameter[constant[entered - function get_event_iter_returns()]]] if compare[name[timeout] is constant[None]] begin[:] variable[timeout] assign[=] call[name[self].opts][constant[timeout]] variable[timeout_at] assign[=] binary_operation[call[name[time].time, parameter[]] + name[timeout]] variable[found] assign[=] call[name[set], parameter[]] if compare[call[call[name[self].returners][call[constant[{0}.get_load].format, parameter[call[name[self].opts][constant[master_job_cache]]]]], parameter[name[jid]]] equal[==] dictionary[[], []]] begin[:] call[name[log].warning, parameter[constant[jid does not exist]]] <ast.Yield object at 0x7da18f7214e0> <ast.Raise object at 0x7da18f721600> while constant[True] begin[:] variable[raw] assign[=] call[name[self].event.get_event, parameter[name[timeout]]] if <ast.BoolOp object at 0x7da207f99810> begin[:] break if compare[constant[minions] in call[name[raw].get, parameter[constant[data], dictionary[[], []]]]] begin[:] continue <ast.Try object at 0x7da1b21e9600> if compare[constant[out] in name[raw]] begin[:] call[call[name[ret]][call[name[raw]][constant[id]]]][constant[out]] assign[=] call[name[raw]][constant[out]] <ast.Yield object at 0x7da204621cc0> call[name[time].sleep, parameter[constant[0.02]]]
keyword[def] identifier[get_event_iter_returns] ( identifier[self] , identifier[jid] , identifier[minions] , identifier[timeout] = keyword[None] ): literal[string] identifier[log] . identifier[trace] ( literal[string] ) keyword[if] identifier[timeout] keyword[is] keyword[None] : identifier[timeout] = identifier[self] . identifier[opts] [ literal[string] ] identifier[timeout_at] = identifier[time] . identifier[time] ()+ identifier[timeout] identifier[found] = identifier[set] () keyword[if] identifier[self] . identifier[returners] [ literal[string] . identifier[format] ( identifier[self] . identifier[opts] [ literal[string] ])]( identifier[jid] )=={}: identifier[log] . identifier[warning] ( literal[string] ) keyword[yield] {} keyword[raise] identifier[StopIteration] () keyword[while] keyword[True] : identifier[raw] = identifier[self] . identifier[event] . identifier[get_event] ( identifier[timeout] , identifier[auto_reconnect] = identifier[self] . identifier[auto_reconnect] ) keyword[if] identifier[raw] keyword[is] keyword[None] keyword[or] identifier[time] . identifier[time] ()> identifier[timeout_at] : keyword[break] keyword[if] literal[string] keyword[in] identifier[raw] . identifier[get] ( literal[string] ,{}): keyword[continue] keyword[try] : identifier[found] . identifier[add] ( identifier[raw] [ literal[string] ]) identifier[ret] ={ identifier[raw] [ literal[string] ]:{ literal[string] : identifier[raw] [ literal[string] ]}} keyword[except] identifier[KeyError] : keyword[continue] keyword[if] literal[string] keyword[in] identifier[raw] : identifier[ret] [ identifier[raw] [ literal[string] ]][ literal[string] ]= identifier[raw] [ literal[string] ] keyword[yield] identifier[ret] identifier[time] . identifier[sleep] ( literal[int] )
def get_event_iter_returns(self, jid, minions, timeout=None): """ Gather the return data from the event system, break hard when timeout is reached. """ log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] # depends on [control=['if'], data=['timeout']] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # depends on [control=['if'], data=[]] # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break # depends on [control=['if'], data=[]] if 'minions' in raw.get('data', {}): continue # depends on [control=['if'], data=[]] try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} # depends on [control=['try'], data=[]] except KeyError: # Ignore other erroneous messages continue # depends on [control=['except'], data=[]] if 'out' in raw: ret[raw['id']]['out'] = raw['out'] # depends on [control=['if'], data=['raw']] yield ret time.sleep(0.02) # depends on [control=['while'], data=[]]
def get_last_commit_modifying_files(repo: Repo, *files) -> str: """ Returns the hash of the last commit which modified some of the files (or files in those folders). :param repo: The repo to check in. :param files: List of files to check :return: Commit hash. """ return repo.git.log(*files, n=1, format="%H")
def function[get_last_commit_modifying_files, parameter[repo]]: constant[ Returns the hash of the last commit which modified some of the files (or files in those folders). :param repo: The repo to check in. :param files: List of files to check :return: Commit hash. ] return[call[name[repo].git.log, parameter[<ast.Starred object at 0x7da1b1643040>]]]
keyword[def] identifier[get_last_commit_modifying_files] ( identifier[repo] : identifier[Repo] ,* identifier[files] )-> identifier[str] : literal[string] keyword[return] identifier[repo] . identifier[git] . identifier[log] (* identifier[files] , identifier[n] = literal[int] , identifier[format] = literal[string] )
def get_last_commit_modifying_files(repo: Repo, *files) -> str: """ Returns the hash of the last commit which modified some of the files (or files in those folders). :param repo: The repo to check in. :param files: List of files to check :return: Commit hash. """ return repo.git.log(*files, n=1, format='%H')
def run(self): """ Run the plugin. """ worker_builds = self.workflow.build_result.annotations['worker-builds'] has_v1_image_id = None repo_tags = {} for platform in worker_builds: build_info = get_worker_build_info(self.workflow, platform) annotations = build_info.build.get_annotations() v1_image_id = annotations.get('v1-image-id') if v1_image_id: if has_v1_image_id: msg = "two platforms with v1-image-ids: {0} and {1}".format(platform, has_v1_image_id) raise RuntimeError(msg) has_v1_image_id = platform self.log.info("tagging v1-image-id %s for platform %s", v1_image_id, platform) ret_val = self.set_v1_tags(v1_image_id) if ret_val: repo_tags = ret_val return repo_tags
def function[run, parameter[self]]: constant[ Run the plugin. ] variable[worker_builds] assign[=] call[name[self].workflow.build_result.annotations][constant[worker-builds]] variable[has_v1_image_id] assign[=] constant[None] variable[repo_tags] assign[=] dictionary[[], []] for taget[name[platform]] in starred[name[worker_builds]] begin[:] variable[build_info] assign[=] call[name[get_worker_build_info], parameter[name[self].workflow, name[platform]]] variable[annotations] assign[=] call[name[build_info].build.get_annotations, parameter[]] variable[v1_image_id] assign[=] call[name[annotations].get, parameter[constant[v1-image-id]]] if name[v1_image_id] begin[:] if name[has_v1_image_id] begin[:] variable[msg] assign[=] call[constant[two platforms with v1-image-ids: {0} and {1}].format, parameter[name[platform], name[has_v1_image_id]]] <ast.Raise object at 0x7da20c76fc70> variable[has_v1_image_id] assign[=] name[platform] call[name[self].log.info, parameter[constant[tagging v1-image-id %s for platform %s], name[v1_image_id], name[platform]]] variable[ret_val] assign[=] call[name[self].set_v1_tags, parameter[name[v1_image_id]]] if name[ret_val] begin[:] variable[repo_tags] assign[=] name[ret_val] return[name[repo_tags]]
keyword[def] identifier[run] ( identifier[self] ): literal[string] identifier[worker_builds] = identifier[self] . identifier[workflow] . identifier[build_result] . identifier[annotations] [ literal[string] ] identifier[has_v1_image_id] = keyword[None] identifier[repo_tags] ={} keyword[for] identifier[platform] keyword[in] identifier[worker_builds] : identifier[build_info] = identifier[get_worker_build_info] ( identifier[self] . identifier[workflow] , identifier[platform] ) identifier[annotations] = identifier[build_info] . identifier[build] . identifier[get_annotations] () identifier[v1_image_id] = identifier[annotations] . identifier[get] ( literal[string] ) keyword[if] identifier[v1_image_id] : keyword[if] identifier[has_v1_image_id] : identifier[msg] = literal[string] . identifier[format] ( identifier[platform] , identifier[has_v1_image_id] ) keyword[raise] identifier[RuntimeError] ( identifier[msg] ) identifier[has_v1_image_id] = identifier[platform] identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[v1_image_id] , identifier[platform] ) identifier[ret_val] = identifier[self] . identifier[set_v1_tags] ( identifier[v1_image_id] ) keyword[if] identifier[ret_val] : identifier[repo_tags] = identifier[ret_val] keyword[return] identifier[repo_tags]
def run(self): """ Run the plugin. """ worker_builds = self.workflow.build_result.annotations['worker-builds'] has_v1_image_id = None repo_tags = {} for platform in worker_builds: build_info = get_worker_build_info(self.workflow, platform) annotations = build_info.build.get_annotations() v1_image_id = annotations.get('v1-image-id') if v1_image_id: if has_v1_image_id: msg = 'two platforms with v1-image-ids: {0} and {1}'.format(platform, has_v1_image_id) raise RuntimeError(msg) # depends on [control=['if'], data=[]] has_v1_image_id = platform self.log.info('tagging v1-image-id %s for platform %s', v1_image_id, platform) ret_val = self.set_v1_tags(v1_image_id) if ret_val: repo_tags = ret_val # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['platform']] return repo_tags
def d_deta_from_phalf(arr, pfull_coord): """Compute pressure level thickness from half level pressures.""" d_deta = arr.diff(dim=internal_names.PHALF_STR, n=1) return replace_coord(d_deta, internal_names.PHALF_STR, internal_names.PFULL_STR, pfull_coord)
def function[d_deta_from_phalf, parameter[arr, pfull_coord]]: constant[Compute pressure level thickness from half level pressures.] variable[d_deta] assign[=] call[name[arr].diff, parameter[]] return[call[name[replace_coord], parameter[name[d_deta], name[internal_names].PHALF_STR, name[internal_names].PFULL_STR, name[pfull_coord]]]]
keyword[def] identifier[d_deta_from_phalf] ( identifier[arr] , identifier[pfull_coord] ): literal[string] identifier[d_deta] = identifier[arr] . identifier[diff] ( identifier[dim] = identifier[internal_names] . identifier[PHALF_STR] , identifier[n] = literal[int] ) keyword[return] identifier[replace_coord] ( identifier[d_deta] , identifier[internal_names] . identifier[PHALF_STR] , identifier[internal_names] . identifier[PFULL_STR] , identifier[pfull_coord] )
def d_deta_from_phalf(arr, pfull_coord): """Compute pressure level thickness from half level pressures.""" d_deta = arr.diff(dim=internal_names.PHALF_STR, n=1) return replace_coord(d_deta, internal_names.PHALF_STR, internal_names.PFULL_STR, pfull_coord)
def add(self, visualization): """ Creates a new visualization :param visualization: instance of Visualization :return: """ res = self.es.create(index=self.index, id=visualization.id or str(uuid.uuid1()), doc_type=self.doc_type, body=visualization.to_kibana(), refresh=True) return res
def function[add, parameter[self, visualization]]: constant[ Creates a new visualization :param visualization: instance of Visualization :return: ] variable[res] assign[=] call[name[self].es.create, parameter[]] return[name[res]]
keyword[def] identifier[add] ( identifier[self] , identifier[visualization] ): literal[string] identifier[res] = identifier[self] . identifier[es] . identifier[create] ( identifier[index] = identifier[self] . identifier[index] , identifier[id] = identifier[visualization] . identifier[id] keyword[or] identifier[str] ( identifier[uuid] . identifier[uuid1] ()), identifier[doc_type] = identifier[self] . identifier[doc_type] , identifier[body] = identifier[visualization] . identifier[to_kibana] (), identifier[refresh] = keyword[True] ) keyword[return] identifier[res]
def add(self, visualization): """ Creates a new visualization :param visualization: instance of Visualization :return: """ res = self.es.create(index=self.index, id=visualization.id or str(uuid.uuid1()), doc_type=self.doc_type, body=visualization.to_kibana(), refresh=True) return res
def Add_text(self, s): """ Add text to measurement data window. """ self.logger.DeleteAllItems() FONT_RATIO = self.GUI_RESOLUTION + (self.GUI_RESOLUTION - 1) * 5 if self.GUI_RESOLUTION > 1.1: font1 = wx.Font(11, wx.SWISS, wx.NORMAL, wx.NORMAL, False, self.font_type) elif self.GUI_RESOLUTION <= 0.9: font1 = wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL, False, self.font_type) else: font1 = wx.Font(10, wx.SWISS, wx.NORMAL, wx.NORMAL, False, self.font_type) # get temperature indecies to display current interp steps in logger t1 = self.tmin_box.GetValue() t2 = self.tmax_box.GetValue() # microwave or thermal if "LP-PI-M" in self.Data[s]['datablock'][0]['magic_method_codes']: MICROWAVE = True THERMAL = False steps_tr = [] for rec in self.Data[s]['datablock']: if "measurement_description" in rec: MW_step = rec["measurement_description"].strip( '\n').split(":") for STEP in MW_step: if "Number" in STEP: temp = float(STEP.split("-")[-1]) steps_tr.append(temp) else: power = rec['treatment_mw_power'] if '-' in str(power): power = power.split('-')[-1] steps_tr.append(int(power)) #steps_tr = [float(d['treatment_mw_power'].split("-")[-1]) # for d in self.Data[s]['datablock']] else: MICROWAVE = False THERMAL = True steps_tr = [float(d['treatment_temp']) - 273 for d in self.Data[s]['datablock']] if (t1 == "" or t2 == "") or float(t2) < float(t1): tmin_index, tmax_index = -1, -1 else: tmin_index = steps_tr.index(int(t1)) tmax_index = steps_tr.index(int(t2)) self.logger.SetFont(font1) for i, rec in enumerate(self.Data[s]['datablock']): if "LT-NO" in rec['magic_method_codes']: step = "N" elif "LT-AF-Z" in rec['magic_method_codes']: step = "AFD" elif "LT-T-Z" in rec['magic_method_codes'] or 'LT-M-Z' in rec['magic_method_codes']: step = "Z" elif "LT-T-I" in rec['magic_method_codes'] or 'LT-M-I' in rec['magic_method_codes']: step = "I" elif "LT-PTRM-I" in rec['magic_method_codes'] or "LT-PMRM-I" in rec['magic_method_codes']: step = "P" elif "LT-PTRM-MD" in rec['magic_method_codes'] or "LT-PMRM-MD" in rec['magic_method_codes']: step = "T" elif "LT-PTRM-AC" in rec['magic_method_codes'] or "LT-PMRM-AC" in rec['magic_method_codes']: step = "A" else: print(("unrecognized step in specimen %s Method codes: %s" % (str(rec['magic_method_codes']), s))) if THERMAL: self.logger.InsertItem(i, "%i" % i) self.logger.SetItem(i, 1, step) self.logger.SetItem(i, 2, "%1.0f" % (float(rec['treatment_temp']) - 273.)) self.logger.SetItem(i, 3, "%.1f" % float(rec['measurement_dec'])) self.logger.SetItem(i, 4, "%.1f" % float(rec['measurement_inc'])) self.logger.SetItem(i, 5, "%.2e" % float(rec['measurement_magn_moment'])) elif MICROWAVE: # mcrowave if "measurement_description" in list(rec.keys()): MW_step = rec["measurement_description"].strip( '\n').split(":") for STEP in MW_step: if "Number" not in STEP: continue temp = float(STEP.split("-")[-1]) self.logger.InsertItem(i, "%i" % i) self.logger.SetItem(i, 1, step) self.logger.SetItem(i, 2, "%1.0f" % temp) self.logger.SetItem(i, 3, "%.1f" % float(rec['measurement_dec'])) self.logger.SetItem(i, 4, "%.1f" % float(rec['measurement_inc'])) self.logger.SetItem(i, 5, "%.2e" % float( rec['measurement_magn_moment'])) self.logger.SetItemBackgroundColour(i, "WHITE") if i >= tmin_index and i <= tmax_index: self.logger.SetItemBackgroundColour(i, "LIGHT BLUE") if 'measurement_flag' not in list(rec.keys()): rec['measurement_flag'] = 'g'
def function[Add_text, parameter[self, s]]: constant[ Add text to measurement data window. ] call[name[self].logger.DeleteAllItems, parameter[]] variable[FONT_RATIO] assign[=] binary_operation[name[self].GUI_RESOLUTION + binary_operation[binary_operation[name[self].GUI_RESOLUTION - constant[1]] * constant[5]]] if compare[name[self].GUI_RESOLUTION greater[>] constant[1.1]] begin[:] variable[font1] assign[=] call[name[wx].Font, parameter[constant[11], name[wx].SWISS, name[wx].NORMAL, name[wx].NORMAL, constant[False], name[self].font_type]] variable[t1] assign[=] call[name[self].tmin_box.GetValue, parameter[]] variable[t2] assign[=] call[name[self].tmax_box.GetValue, parameter[]] if compare[constant[LP-PI-M] in call[call[call[call[name[self].Data][name[s]]][constant[datablock]]][constant[0]]][constant[magic_method_codes]]] begin[:] variable[MICROWAVE] assign[=] constant[True] variable[THERMAL] assign[=] constant[False] variable[steps_tr] assign[=] list[[]] for taget[name[rec]] in starred[call[call[name[self].Data][name[s]]][constant[datablock]]] begin[:] if compare[constant[measurement_description] in name[rec]] begin[:] variable[MW_step] assign[=] call[call[call[name[rec]][constant[measurement_description]].strip, parameter[constant[ ]]].split, parameter[constant[:]]] for taget[name[STEP]] in starred[name[MW_step]] begin[:] if compare[constant[Number] in name[STEP]] begin[:] variable[temp] assign[=] call[name[float], parameter[call[call[name[STEP].split, parameter[constant[-]]]][<ast.UnaryOp object at 0x7da20c76eef0>]]] call[name[steps_tr].append, parameter[name[temp]]] if <ast.BoolOp object at 0x7da1b05b1480> begin[:] <ast.Tuple object at 0x7da1b05b0b50> assign[=] tuple[[<ast.UnaryOp object at 0x7da1b05b08b0>, <ast.UnaryOp object at 0x7da1b05b0850>]] call[name[self].logger.SetFont, parameter[name[font1]]] for taget[tuple[[<ast.Name object at 0x7da1b05b0700>, <ast.Name object at 0x7da1b05b0580>]]] in starred[call[name[enumerate], parameter[call[call[name[self].Data][name[s]]][constant[datablock]]]]] begin[:] if compare[constant[LT-NO] in call[name[rec]][constant[magic_method_codes]]] begin[:] variable[step] assign[=] constant[N] if name[THERMAL] begin[:] call[name[self].logger.InsertItem, parameter[name[i], binary_operation[constant[%i] <ast.Mod object at 0x7da2590d6920> name[i]]]] call[name[self].logger.SetItem, parameter[name[i], constant[1], name[step]]] call[name[self].logger.SetItem, parameter[name[i], constant[2], binary_operation[constant[%1.0f] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[float], parameter[call[name[rec]][constant[treatment_temp]]]] - constant[273.0]]]]] call[name[self].logger.SetItem, parameter[name[i], constant[3], binary_operation[constant[%.1f] <ast.Mod object at 0x7da2590d6920> call[name[float], parameter[call[name[rec]][constant[measurement_dec]]]]]]] call[name[self].logger.SetItem, parameter[name[i], constant[4], binary_operation[constant[%.1f] <ast.Mod object at 0x7da2590d6920> call[name[float], parameter[call[name[rec]][constant[measurement_inc]]]]]]] call[name[self].logger.SetItem, parameter[name[i], constant[5], binary_operation[constant[%.2e] <ast.Mod object at 0x7da2590d6920> call[name[float], parameter[call[name[rec]][constant[measurement_magn_moment]]]]]]] call[name[self].logger.SetItemBackgroundColour, parameter[name[i], constant[WHITE]]] if <ast.BoolOp object at 0x7da1b04fda50> begin[:] call[name[self].logger.SetItemBackgroundColour, parameter[name[i], constant[LIGHT BLUE]]] if compare[constant[measurement_flag] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[call[name[rec].keys, parameter[]]]]] begin[:] call[name[rec]][constant[measurement_flag]] assign[=] constant[g]
keyword[def] identifier[Add_text] ( identifier[self] , identifier[s] ): literal[string] identifier[self] . identifier[logger] . identifier[DeleteAllItems] () identifier[FONT_RATIO] = identifier[self] . identifier[GUI_RESOLUTION] +( identifier[self] . identifier[GUI_RESOLUTION] - literal[int] )* literal[int] keyword[if] identifier[self] . identifier[GUI_RESOLUTION] > literal[int] : identifier[font1] = identifier[wx] . identifier[Font] ( literal[int] , identifier[wx] . identifier[SWISS] , identifier[wx] . identifier[NORMAL] , identifier[wx] . identifier[NORMAL] , keyword[False] , identifier[self] . identifier[font_type] ) keyword[elif] identifier[self] . identifier[GUI_RESOLUTION] <= literal[int] : identifier[font1] = identifier[wx] . identifier[Font] ( literal[int] , identifier[wx] . identifier[SWISS] , identifier[wx] . identifier[NORMAL] , identifier[wx] . identifier[NORMAL] , keyword[False] , identifier[self] . identifier[font_type] ) keyword[else] : identifier[font1] = identifier[wx] . identifier[Font] ( literal[int] , identifier[wx] . identifier[SWISS] , identifier[wx] . identifier[NORMAL] , identifier[wx] . identifier[NORMAL] , keyword[False] , identifier[self] . identifier[font_type] ) identifier[t1] = identifier[self] . identifier[tmin_box] . identifier[GetValue] () identifier[t2] = identifier[self] . identifier[tmax_box] . identifier[GetValue] () keyword[if] literal[string] keyword[in] identifier[self] . identifier[Data] [ identifier[s] ][ literal[string] ][ literal[int] ][ literal[string] ]: identifier[MICROWAVE] = keyword[True] identifier[THERMAL] = keyword[False] identifier[steps_tr] =[] keyword[for] identifier[rec] keyword[in] identifier[self] . identifier[Data] [ identifier[s] ][ literal[string] ]: keyword[if] literal[string] keyword[in] identifier[rec] : identifier[MW_step] = identifier[rec] [ literal[string] ]. identifier[strip] ( literal[string] ). identifier[split] ( literal[string] ) keyword[for] identifier[STEP] keyword[in] identifier[MW_step] : keyword[if] literal[string] keyword[in] identifier[STEP] : identifier[temp] = identifier[float] ( identifier[STEP] . identifier[split] ( literal[string] )[- literal[int] ]) identifier[steps_tr] . identifier[append] ( identifier[temp] ) keyword[else] : identifier[power] = identifier[rec] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[str] ( identifier[power] ): identifier[power] = identifier[power] . identifier[split] ( literal[string] )[- literal[int] ] identifier[steps_tr] . identifier[append] ( identifier[int] ( identifier[power] )) keyword[else] : identifier[MICROWAVE] = keyword[False] identifier[THERMAL] = keyword[True] identifier[steps_tr] =[ identifier[float] ( identifier[d] [ literal[string] ])- literal[int] keyword[for] identifier[d] keyword[in] identifier[self] . identifier[Data] [ identifier[s] ][ literal[string] ]] keyword[if] ( identifier[t1] == literal[string] keyword[or] identifier[t2] == literal[string] ) keyword[or] identifier[float] ( identifier[t2] )< identifier[float] ( identifier[t1] ): identifier[tmin_index] , identifier[tmax_index] =- literal[int] ,- literal[int] keyword[else] : identifier[tmin_index] = identifier[steps_tr] . identifier[index] ( identifier[int] ( identifier[t1] )) identifier[tmax_index] = identifier[steps_tr] . identifier[index] ( identifier[int] ( identifier[t2] )) identifier[self] . identifier[logger] . identifier[SetFont] ( identifier[font1] ) keyword[for] identifier[i] , identifier[rec] keyword[in] identifier[enumerate] ( identifier[self] . identifier[Data] [ identifier[s] ][ literal[string] ]): keyword[if] literal[string] keyword[in] identifier[rec] [ literal[string] ]: identifier[step] = literal[string] keyword[elif] literal[string] keyword[in] identifier[rec] [ literal[string] ]: identifier[step] = literal[string] keyword[elif] literal[string] keyword[in] identifier[rec] [ literal[string] ] keyword[or] literal[string] keyword[in] identifier[rec] [ literal[string] ]: identifier[step] = literal[string] keyword[elif] literal[string] keyword[in] identifier[rec] [ literal[string] ] keyword[or] literal[string] keyword[in] identifier[rec] [ literal[string] ]: identifier[step] = literal[string] keyword[elif] literal[string] keyword[in] identifier[rec] [ literal[string] ] keyword[or] literal[string] keyword[in] identifier[rec] [ literal[string] ]: identifier[step] = literal[string] keyword[elif] literal[string] keyword[in] identifier[rec] [ literal[string] ] keyword[or] literal[string] keyword[in] identifier[rec] [ literal[string] ]: identifier[step] = literal[string] keyword[elif] literal[string] keyword[in] identifier[rec] [ literal[string] ] keyword[or] literal[string] keyword[in] identifier[rec] [ literal[string] ]: identifier[step] = literal[string] keyword[else] : identifier[print] (( literal[string] % ( identifier[str] ( identifier[rec] [ literal[string] ]), identifier[s] ))) keyword[if] identifier[THERMAL] : identifier[self] . identifier[logger] . identifier[InsertItem] ( identifier[i] , literal[string] % identifier[i] ) identifier[self] . identifier[logger] . identifier[SetItem] ( identifier[i] , literal[int] , identifier[step] ) identifier[self] . identifier[logger] . identifier[SetItem] ( identifier[i] , literal[int] , literal[string] % ( identifier[float] ( identifier[rec] [ literal[string] ])- literal[int] )) identifier[self] . identifier[logger] . identifier[SetItem] ( identifier[i] , literal[int] , literal[string] % identifier[float] ( identifier[rec] [ literal[string] ])) identifier[self] . identifier[logger] . identifier[SetItem] ( identifier[i] , literal[int] , literal[string] % identifier[float] ( identifier[rec] [ literal[string] ])) identifier[self] . identifier[logger] . identifier[SetItem] ( identifier[i] , literal[int] , literal[string] % identifier[float] ( identifier[rec] [ literal[string] ])) keyword[elif] identifier[MICROWAVE] : keyword[if] literal[string] keyword[in] identifier[list] ( identifier[rec] . identifier[keys] ()): identifier[MW_step] = identifier[rec] [ literal[string] ]. identifier[strip] ( literal[string] ). identifier[split] ( literal[string] ) keyword[for] identifier[STEP] keyword[in] identifier[MW_step] : keyword[if] literal[string] keyword[not] keyword[in] identifier[STEP] : keyword[continue] identifier[temp] = identifier[float] ( identifier[STEP] . identifier[split] ( literal[string] )[- literal[int] ]) identifier[self] . identifier[logger] . identifier[InsertItem] ( identifier[i] , literal[string] % identifier[i] ) identifier[self] . identifier[logger] . identifier[SetItem] ( identifier[i] , literal[int] , identifier[step] ) identifier[self] . identifier[logger] . identifier[SetItem] ( identifier[i] , literal[int] , literal[string] % identifier[temp] ) identifier[self] . identifier[logger] . identifier[SetItem] ( identifier[i] , literal[int] , literal[string] % identifier[float] ( identifier[rec] [ literal[string] ])) identifier[self] . identifier[logger] . identifier[SetItem] ( identifier[i] , literal[int] , literal[string] % identifier[float] ( identifier[rec] [ literal[string] ])) identifier[self] . identifier[logger] . identifier[SetItem] ( identifier[i] , literal[int] , literal[string] % identifier[float] ( identifier[rec] [ literal[string] ])) identifier[self] . identifier[logger] . identifier[SetItemBackgroundColour] ( identifier[i] , literal[string] ) keyword[if] identifier[i] >= identifier[tmin_index] keyword[and] identifier[i] <= identifier[tmax_index] : identifier[self] . identifier[logger] . identifier[SetItemBackgroundColour] ( identifier[i] , literal[string] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[list] ( identifier[rec] . identifier[keys] ()): identifier[rec] [ literal[string] ]= literal[string]
def Add_text(self, s): """ Add text to measurement data window. """ self.logger.DeleteAllItems() FONT_RATIO = self.GUI_RESOLUTION + (self.GUI_RESOLUTION - 1) * 5 if self.GUI_RESOLUTION > 1.1: font1 = wx.Font(11, wx.SWISS, wx.NORMAL, wx.NORMAL, False, self.font_type) # depends on [control=['if'], data=[]] elif self.GUI_RESOLUTION <= 0.9: font1 = wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL, False, self.font_type) # depends on [control=['if'], data=[]] else: font1 = wx.Font(10, wx.SWISS, wx.NORMAL, wx.NORMAL, False, self.font_type) # get temperature indecies to display current interp steps in logger t1 = self.tmin_box.GetValue() t2 = self.tmax_box.GetValue() # microwave or thermal if 'LP-PI-M' in self.Data[s]['datablock'][0]['magic_method_codes']: MICROWAVE = True THERMAL = False steps_tr = [] for rec in self.Data[s]['datablock']: if 'measurement_description' in rec: MW_step = rec['measurement_description'].strip('\n').split(':') for STEP in MW_step: if 'Number' in STEP: temp = float(STEP.split('-')[-1]) steps_tr.append(temp) # depends on [control=['if'], data=['STEP']] # depends on [control=['for'], data=['STEP']] # depends on [control=['if'], data=['rec']] else: power = rec['treatment_mw_power'] if '-' in str(power): power = power.split('-')[-1] # depends on [control=['if'], data=[]] steps_tr.append(int(power)) # depends on [control=['for'], data=['rec']] # depends on [control=['if'], data=[]] else: #steps_tr = [float(d['treatment_mw_power'].split("-")[-1]) # for d in self.Data[s]['datablock']] MICROWAVE = False THERMAL = True steps_tr = [float(d['treatment_temp']) - 273 for d in self.Data[s]['datablock']] if (t1 == '' or t2 == '') or float(t2) < float(t1): (tmin_index, tmax_index) = (-1, -1) # depends on [control=['if'], data=[]] else: tmin_index = steps_tr.index(int(t1)) tmax_index = steps_tr.index(int(t2)) self.logger.SetFont(font1) for (i, rec) in enumerate(self.Data[s]['datablock']): if 'LT-NO' in rec['magic_method_codes']: step = 'N' # depends on [control=['if'], data=[]] elif 'LT-AF-Z' in rec['magic_method_codes']: step = 'AFD' # depends on [control=['if'], data=[]] elif 'LT-T-Z' in rec['magic_method_codes'] or 'LT-M-Z' in rec['magic_method_codes']: step = 'Z' # depends on [control=['if'], data=[]] elif 'LT-T-I' in rec['magic_method_codes'] or 'LT-M-I' in rec['magic_method_codes']: step = 'I' # depends on [control=['if'], data=[]] elif 'LT-PTRM-I' in rec['magic_method_codes'] or 'LT-PMRM-I' in rec['magic_method_codes']: step = 'P' # depends on [control=['if'], data=[]] elif 'LT-PTRM-MD' in rec['magic_method_codes'] or 'LT-PMRM-MD' in rec['magic_method_codes']: step = 'T' # depends on [control=['if'], data=[]] elif 'LT-PTRM-AC' in rec['magic_method_codes'] or 'LT-PMRM-AC' in rec['magic_method_codes']: step = 'A' # depends on [control=['if'], data=[]] else: print('unrecognized step in specimen %s Method codes: %s' % (str(rec['magic_method_codes']), s)) if THERMAL: self.logger.InsertItem(i, '%i' % i) self.logger.SetItem(i, 1, step) self.logger.SetItem(i, 2, '%1.0f' % (float(rec['treatment_temp']) - 273.0)) self.logger.SetItem(i, 3, '%.1f' % float(rec['measurement_dec'])) self.logger.SetItem(i, 4, '%.1f' % float(rec['measurement_inc'])) self.logger.SetItem(i, 5, '%.2e' % float(rec['measurement_magn_moment'])) # depends on [control=['if'], data=[]] elif MICROWAVE: # mcrowave if 'measurement_description' in list(rec.keys()): MW_step = rec['measurement_description'].strip('\n').split(':') for STEP in MW_step: if 'Number' not in STEP: continue # depends on [control=['if'], data=[]] temp = float(STEP.split('-')[-1]) self.logger.InsertItem(i, '%i' % i) self.logger.SetItem(i, 1, step) self.logger.SetItem(i, 2, '%1.0f' % temp) self.logger.SetItem(i, 3, '%.1f' % float(rec['measurement_dec'])) self.logger.SetItem(i, 4, '%.1f' % float(rec['measurement_inc'])) self.logger.SetItem(i, 5, '%.2e' % float(rec['measurement_magn_moment'])) # depends on [control=['for'], data=['STEP']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] self.logger.SetItemBackgroundColour(i, 'WHITE') if i >= tmin_index and i <= tmax_index: self.logger.SetItemBackgroundColour(i, 'LIGHT BLUE') # depends on [control=['if'], data=[]] if 'measurement_flag' not in list(rec.keys()): rec['measurement_flag'] = 'g' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def _stick_device(self): """ Discovers the filename of the evdev device that represents the Sense HAT's joystick. """ for evdev in glob.glob('/sys/class/input/event*'): try: with io.open(os.path.join(evdev, 'device', 'name'), 'r') as f: if f.read().strip() == self.SENSE_HAT_EVDEV_NAME: return os.path.join('/dev', 'input', os.path.basename(evdev)) except IOError as e: if e.errno != errno.ENOENT: raise raise RuntimeError('unable to locate SenseHAT joystick device')
def function[_stick_device, parameter[self]]: constant[ Discovers the filename of the evdev device that represents the Sense HAT's joystick. ] for taget[name[evdev]] in starred[call[name[glob].glob, parameter[constant[/sys/class/input/event*]]]] begin[:] <ast.Try object at 0x7da1b0981300> <ast.Raise object at 0x7da1b0981870>
keyword[def] identifier[_stick_device] ( identifier[self] ): literal[string] keyword[for] identifier[evdev] keyword[in] identifier[glob] . identifier[glob] ( literal[string] ): keyword[try] : keyword[with] identifier[io] . identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[evdev] , literal[string] , literal[string] ), literal[string] ) keyword[as] identifier[f] : keyword[if] identifier[f] . identifier[read] (). identifier[strip] ()== identifier[self] . identifier[SENSE_HAT_EVDEV_NAME] : keyword[return] identifier[os] . identifier[path] . identifier[join] ( literal[string] , literal[string] , identifier[os] . identifier[path] . identifier[basename] ( identifier[evdev] )) keyword[except] identifier[IOError] keyword[as] identifier[e] : keyword[if] identifier[e] . identifier[errno] != identifier[errno] . identifier[ENOENT] : keyword[raise] keyword[raise] identifier[RuntimeError] ( literal[string] )
def _stick_device(self): """ Discovers the filename of the evdev device that represents the Sense HAT's joystick. """ for evdev in glob.glob('/sys/class/input/event*'): try: with io.open(os.path.join(evdev, 'device', 'name'), 'r') as f: if f.read().strip() == self.SENSE_HAT_EVDEV_NAME: return os.path.join('/dev', 'input', os.path.basename(evdev)) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]] except IOError as e: if e.errno != errno.ENOENT: raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['evdev']] raise RuntimeError('unable to locate SenseHAT joystick device')
def blob_services(self): """Instance depends on the API version: * 2018-07-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2018_07_01.operations.BlobServicesOperations>` """ api_version = self._get_api_version('blob_services') if api_version == '2018-07-01': from .v2018_07_01.operations import BlobServicesOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def function[blob_services, parameter[self]]: constant[Instance depends on the API version: * 2018-07-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2018_07_01.operations.BlobServicesOperations>` ] variable[api_version] assign[=] call[name[self]._get_api_version, parameter[constant[blob_services]]] if compare[name[api_version] equal[==] constant[2018-07-01]] begin[:] from relative_module[v2018_07_01.operations] import module[BlobServicesOperations] return[call[name[OperationClass], parameter[name[self]._client, name[self].config, call[name[Serializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]], call[name[Deserializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]]]]]
keyword[def] identifier[blob_services] ( identifier[self] ): literal[string] identifier[api_version] = identifier[self] . identifier[_get_api_version] ( literal[string] ) keyword[if] identifier[api_version] == literal[string] : keyword[from] . identifier[v2018_07_01] . identifier[operations] keyword[import] identifier[BlobServicesOperations] keyword[as] identifier[OperationClass] keyword[else] : keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[api_version] )) keyword[return] identifier[OperationClass] ( identifier[self] . identifier[_client] , identifier[self] . identifier[config] , identifier[Serializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )), identifier[Deserializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )))
def blob_services(self): """Instance depends on the API version: * 2018-07-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2018_07_01.operations.BlobServicesOperations>` """ api_version = self._get_api_version('blob_services') if api_version == '2018-07-01': from .v2018_07_01.operations import BlobServicesOperations as OperationClass # depends on [control=['if'], data=[]] else: raise NotImplementedError('APIVersion {} is not available'.format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def is_admin(self, send, nick, required_role='admin'): """Checks if a nick is a admin. If NickServ hasn't responded yet, then the admin is unverified, so assume they aren't a admin. """ # If the required role is None, bypass checks. if not required_role: return True # Current roles are admin and owner, which is a superset of admin. with self.db.session_scope() as session: admin = session.query(orm.Permissions).filter(orm.Permissions.nick == nick).first() if admin is None: return False # owner implies admin, but not the other way around. if required_role == "owner" and admin.role != "owner": return False # no nickserv support, assume people are who they say they are. if not self.config['feature'].getboolean('nickserv'): return True if not admin.registered: self.update_authstatus(nick) # We don't necessarily want to complain in all cases. if send is not None: send("Unverified admin: %s" % nick, target=self.config['core']['channel']) return False else: if not self.features['account-notify']: # reverify every 5min if we don't have the notification feature. if datetime.now() - admin.time > timedelta(minutes=5): self.update_authstatus(nick) return True
def function[is_admin, parameter[self, send, nick, required_role]]: constant[Checks if a nick is a admin. If NickServ hasn't responded yet, then the admin is unverified, so assume they aren't a admin. ] if <ast.UnaryOp object at 0x7da1b209df30> begin[:] return[constant[True]] with call[name[self].db.session_scope, parameter[]] begin[:] variable[admin] assign[=] call[call[call[name[session].query, parameter[name[orm].Permissions]].filter, parameter[compare[name[orm].Permissions.nick equal[==] name[nick]]]].first, parameter[]] if compare[name[admin] is constant[None]] begin[:] return[constant[False]] if <ast.BoolOp object at 0x7da1b1f0d480> begin[:] return[constant[False]] if <ast.UnaryOp object at 0x7da1b1f0e740> begin[:] return[constant[True]] if <ast.UnaryOp object at 0x7da18f09c700> begin[:] call[name[self].update_authstatus, parameter[name[nick]]] if compare[name[send] is_not constant[None]] begin[:] call[name[send], parameter[binary_operation[constant[Unverified admin: %s] <ast.Mod object at 0x7da2590d6920> name[nick]]]] return[constant[False]]
keyword[def] identifier[is_admin] ( identifier[self] , identifier[send] , identifier[nick] , identifier[required_role] = literal[string] ): literal[string] keyword[if] keyword[not] identifier[required_role] : keyword[return] keyword[True] keyword[with] identifier[self] . identifier[db] . identifier[session_scope] () keyword[as] identifier[session] : identifier[admin] = identifier[session] . identifier[query] ( identifier[orm] . identifier[Permissions] ). identifier[filter] ( identifier[orm] . identifier[Permissions] . identifier[nick] == identifier[nick] ). identifier[first] () keyword[if] identifier[admin] keyword[is] keyword[None] : keyword[return] keyword[False] keyword[if] identifier[required_role] == literal[string] keyword[and] identifier[admin] . identifier[role] != literal[string] : keyword[return] keyword[False] keyword[if] keyword[not] identifier[self] . identifier[config] [ literal[string] ]. identifier[getboolean] ( literal[string] ): keyword[return] keyword[True] keyword[if] keyword[not] identifier[admin] . identifier[registered] : identifier[self] . identifier[update_authstatus] ( identifier[nick] ) keyword[if] identifier[send] keyword[is] keyword[not] keyword[None] : identifier[send] ( literal[string] % identifier[nick] , identifier[target] = identifier[self] . identifier[config] [ literal[string] ][ literal[string] ]) keyword[return] keyword[False] keyword[else] : keyword[if] keyword[not] identifier[self] . identifier[features] [ literal[string] ]: keyword[if] identifier[datetime] . identifier[now] ()- identifier[admin] . identifier[time] > identifier[timedelta] ( identifier[minutes] = literal[int] ): identifier[self] . identifier[update_authstatus] ( identifier[nick] ) keyword[return] keyword[True]
def is_admin(self, send, nick, required_role='admin'): """Checks if a nick is a admin. If NickServ hasn't responded yet, then the admin is unverified, so assume they aren't a admin. """ # If the required role is None, bypass checks. if not required_role: return True # depends on [control=['if'], data=[]] # Current roles are admin and owner, which is a superset of admin. with self.db.session_scope() as session: admin = session.query(orm.Permissions).filter(orm.Permissions.nick == nick).first() if admin is None: return False # depends on [control=['if'], data=[]] # owner implies admin, but not the other way around. if required_role == 'owner' and admin.role != 'owner': return False # depends on [control=['if'], data=[]] # no nickserv support, assume people are who they say they are. if not self.config['feature'].getboolean('nickserv'): return True # depends on [control=['if'], data=[]] if not admin.registered: self.update_authstatus(nick) # We don't necessarily want to complain in all cases. if send is not None: send('Unverified admin: %s' % nick, target=self.config['core']['channel']) # depends on [control=['if'], data=['send']] return False # depends on [control=['if'], data=[]] else: if not self.features['account-notify']: # reverify every 5min if we don't have the notification feature. if datetime.now() - admin.time > timedelta(minutes=5): self.update_authstatus(nick) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return True # depends on [control=['with'], data=['session']]
def bind_cache_grant(app, provider, current_user, config_prefix='OAUTH2'): """Configures an :class:`OAuth2Provider` instance to use various caching systems to get and set the grant token. This removes the need to register :func:`grantgetter` and :func:`grantsetter` yourself. :param app: Flask application instance :param provider: :class:`OAuth2Provider` instance :param current_user: function that returns an :class:`User` object :param config_prefix: prefix for config A usage example:: oauth = OAuth2Provider(app) app.config.update({'OAUTH2_CACHE_TYPE': 'redis'}) bind_cache_grant(app, oauth, current_user) You can define which cache system you would like to use by setting the following configuration option:: OAUTH2_CACHE_TYPE = 'null' // memcache, simple, redis, filesystem For more information on the supported cache systems please visit: `Cache <http://werkzeug.pocoo.org/docs/contrib/cache/>`_ """ cache = Cache(app, config_prefix) @provider.grantsetter def create_grant(client_id, code, request, *args, **kwargs): """Sets the grant token with the configured cache system""" grant = Grant( cache, client_id=client_id, code=code['code'], redirect_uri=request.redirect_uri, scopes=request.scopes, user=current_user(), ) log.debug("Set Grant Token with key %s" % grant.key) cache.set(grant.key, dict(grant)) @provider.grantgetter def get(client_id, code): """Gets the grant token with the configured cache system""" grant = Grant(cache, client_id=client_id, code=code) ret = cache.get(grant.key) if not ret: log.debug("Grant Token not found with key %s" % grant.key) return None log.debug("Grant Token found with key %s" % grant.key) for k, v in ret.items(): setattr(grant, k, v) return grant
def function[bind_cache_grant, parameter[app, provider, current_user, config_prefix]]: constant[Configures an :class:`OAuth2Provider` instance to use various caching systems to get and set the grant token. This removes the need to register :func:`grantgetter` and :func:`grantsetter` yourself. :param app: Flask application instance :param provider: :class:`OAuth2Provider` instance :param current_user: function that returns an :class:`User` object :param config_prefix: prefix for config A usage example:: oauth = OAuth2Provider(app) app.config.update({'OAUTH2_CACHE_TYPE': 'redis'}) bind_cache_grant(app, oauth, current_user) You can define which cache system you would like to use by setting the following configuration option:: OAUTH2_CACHE_TYPE = 'null' // memcache, simple, redis, filesystem For more information on the supported cache systems please visit: `Cache <http://werkzeug.pocoo.org/docs/contrib/cache/>`_ ] variable[cache] assign[=] call[name[Cache], parameter[name[app], name[config_prefix]]] def function[create_grant, parameter[client_id, code, request]]: constant[Sets the grant token with the configured cache system] variable[grant] assign[=] call[name[Grant], parameter[name[cache]]] call[name[log].debug, parameter[binary_operation[constant[Set Grant Token with key %s] <ast.Mod object at 0x7da2590d6920> name[grant].key]]] call[name[cache].set, parameter[name[grant].key, call[name[dict], parameter[name[grant]]]]] def function[get, parameter[client_id, code]]: constant[Gets the grant token with the configured cache system] variable[grant] assign[=] call[name[Grant], parameter[name[cache]]] variable[ret] assign[=] call[name[cache].get, parameter[name[grant].key]] if <ast.UnaryOp object at 0x7da1b024db70> begin[:] call[name[log].debug, parameter[binary_operation[constant[Grant Token not found with key %s] <ast.Mod object at 0x7da2590d6920> name[grant].key]]] return[constant[None]] call[name[log].debug, parameter[binary_operation[constant[Grant Token found with key %s] <ast.Mod object at 0x7da2590d6920> name[grant].key]]] for taget[tuple[[<ast.Name object at 0x7da1b024eef0>, <ast.Name object at 0x7da1b024c610>]]] in starred[call[name[ret].items, parameter[]]] begin[:] call[name[setattr], parameter[name[grant], name[k], name[v]]] return[name[grant]]
keyword[def] identifier[bind_cache_grant] ( identifier[app] , identifier[provider] , identifier[current_user] , identifier[config_prefix] = literal[string] ): literal[string] identifier[cache] = identifier[Cache] ( identifier[app] , identifier[config_prefix] ) @ identifier[provider] . identifier[grantsetter] keyword[def] identifier[create_grant] ( identifier[client_id] , identifier[code] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[grant] = identifier[Grant] ( identifier[cache] , identifier[client_id] = identifier[client_id] , identifier[code] = identifier[code] [ literal[string] ], identifier[redirect_uri] = identifier[request] . identifier[redirect_uri] , identifier[scopes] = identifier[request] . identifier[scopes] , identifier[user] = identifier[current_user] (), ) identifier[log] . identifier[debug] ( literal[string] % identifier[grant] . identifier[key] ) identifier[cache] . identifier[set] ( identifier[grant] . identifier[key] , identifier[dict] ( identifier[grant] )) @ identifier[provider] . identifier[grantgetter] keyword[def] identifier[get] ( identifier[client_id] , identifier[code] ): literal[string] identifier[grant] = identifier[Grant] ( identifier[cache] , identifier[client_id] = identifier[client_id] , identifier[code] = identifier[code] ) identifier[ret] = identifier[cache] . identifier[get] ( identifier[grant] . identifier[key] ) keyword[if] keyword[not] identifier[ret] : identifier[log] . identifier[debug] ( literal[string] % identifier[grant] . identifier[key] ) keyword[return] keyword[None] identifier[log] . identifier[debug] ( literal[string] % identifier[grant] . identifier[key] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[ret] . identifier[items] (): identifier[setattr] ( identifier[grant] , identifier[k] , identifier[v] ) keyword[return] identifier[grant]
def bind_cache_grant(app, provider, current_user, config_prefix='OAUTH2'): """Configures an :class:`OAuth2Provider` instance to use various caching systems to get and set the grant token. This removes the need to register :func:`grantgetter` and :func:`grantsetter` yourself. :param app: Flask application instance :param provider: :class:`OAuth2Provider` instance :param current_user: function that returns an :class:`User` object :param config_prefix: prefix for config A usage example:: oauth = OAuth2Provider(app) app.config.update({'OAUTH2_CACHE_TYPE': 'redis'}) bind_cache_grant(app, oauth, current_user) You can define which cache system you would like to use by setting the following configuration option:: OAUTH2_CACHE_TYPE = 'null' // memcache, simple, redis, filesystem For more information on the supported cache systems please visit: `Cache <http://werkzeug.pocoo.org/docs/contrib/cache/>`_ """ cache = Cache(app, config_prefix) @provider.grantsetter def create_grant(client_id, code, request, *args, **kwargs): """Sets the grant token with the configured cache system""" grant = Grant(cache, client_id=client_id, code=code['code'], redirect_uri=request.redirect_uri, scopes=request.scopes, user=current_user()) log.debug('Set Grant Token with key %s' % grant.key) cache.set(grant.key, dict(grant)) @provider.grantgetter def get(client_id, code): """Gets the grant token with the configured cache system""" grant = Grant(cache, client_id=client_id, code=code) ret = cache.get(grant.key) if not ret: log.debug('Grant Token not found with key %s' % grant.key) return None # depends on [control=['if'], data=[]] log.debug('Grant Token found with key %s' % grant.key) for (k, v) in ret.items(): setattr(grant, k, v) # depends on [control=['for'], data=[]] return grant
def _print(self, msg, flush=False, end="\n"): """Helper function to print connection status messages when in verbose mode.""" if self._verbose: print2(msg, end=end, flush=flush)
def function[_print, parameter[self, msg, flush, end]]: constant[Helper function to print connection status messages when in verbose mode.] if name[self]._verbose begin[:] call[name[print2], parameter[name[msg]]]
keyword[def] identifier[_print] ( identifier[self] , identifier[msg] , identifier[flush] = keyword[False] , identifier[end] = literal[string] ): literal[string] keyword[if] identifier[self] . identifier[_verbose] : identifier[print2] ( identifier[msg] , identifier[end] = identifier[end] , identifier[flush] = identifier[flush] )
def _print(self, msg, flush=False, end='\n'): """Helper function to print connection status messages when in verbose mode.""" if self._verbose: print2(msg, end=end, flush=flush) # depends on [control=['if'], data=[]]
def move_application(self, app_id, queue): """Move an application to a different queue. Parameters ---------- app_id : str The id of the application to move. queue : str The queue to move the application to. """ self._call('moveApplication', proto.MoveRequest(id=app_id, queue=queue))
def function[move_application, parameter[self, app_id, queue]]: constant[Move an application to a different queue. Parameters ---------- app_id : str The id of the application to move. queue : str The queue to move the application to. ] call[name[self]._call, parameter[constant[moveApplication], call[name[proto].MoveRequest, parameter[]]]]
keyword[def] identifier[move_application] ( identifier[self] , identifier[app_id] , identifier[queue] ): literal[string] identifier[self] . identifier[_call] ( literal[string] , identifier[proto] . identifier[MoveRequest] ( identifier[id] = identifier[app_id] , identifier[queue] = identifier[queue] ))
def move_application(self, app_id, queue): """Move an application to a different queue. Parameters ---------- app_id : str The id of the application to move. queue : str The queue to move the application to. """ self._call('moveApplication', proto.MoveRequest(id=app_id, queue=queue))
def get_unique_counter(self, redis_conn=None, host='localhost', port=6379, key='unique_counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12): ''' Generate a new UniqueCounter. Useful for exactly counting unique objects @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep ''' counter = UniqueCounter(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
def function[get_unique_counter, parameter[self, redis_conn, host, port, key, cycle_time, start_time, window, roll, keep_max]]: constant[ Generate a new UniqueCounter. Useful for exactly counting unique objects @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep ] variable[counter] assign[=] call[name[UniqueCounter], parameter[]] call[name[counter].setup, parameter[]] return[name[counter]]
keyword[def] identifier[get_unique_counter] ( identifier[self] , identifier[redis_conn] = keyword[None] , identifier[host] = literal[string] , identifier[port] = literal[int] , identifier[key] = literal[string] , identifier[cycle_time] = literal[int] , identifier[start_time] = keyword[None] , identifier[window] = identifier[SECONDS_1_HOUR] , identifier[roll] = keyword[True] , identifier[keep_max] = literal[int] ): literal[string] identifier[counter] = identifier[UniqueCounter] ( identifier[key] = identifier[key] , identifier[cycle_time] = identifier[cycle_time] , identifier[start_time] = identifier[start_time] , identifier[window] = identifier[window] , identifier[roll] = identifier[roll] , identifier[keep_max] = identifier[keep_max] ) identifier[counter] . identifier[setup] ( identifier[redis_conn] = identifier[redis_conn] , identifier[host] = identifier[host] , identifier[port] = identifier[port] ) keyword[return] identifier[counter]
def get_unique_counter(self, redis_conn=None, host='localhost', port=6379, key='unique_counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12): """ Generate a new UniqueCounter. Useful for exactly counting unique objects @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep """ counter = UniqueCounter(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover """Message printer. """ if enable_verbose: print(" " * indent + message)
def function[_show, parameter[self, message, indent, enable_verbose]]: constant[Message printer. ] if name[enable_verbose] begin[:] call[name[print], parameter[binary_operation[binary_operation[constant[ ] * name[indent]] + name[message]]]]
keyword[def] identifier[_show] ( identifier[self] , identifier[message] , identifier[indent] = literal[int] , identifier[enable_verbose] = keyword[True] ): literal[string] keyword[if] identifier[enable_verbose] : identifier[print] ( literal[string] * identifier[indent] + identifier[message] )
def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover 'Message printer.\n ' if enable_verbose: print(' ' * indent + message) # depends on [control=['if'], data=[]]
def rand_sub(arr,*args,**kwargs): ''' arr = ['scheme', 'username', 'password', 'hostname', 'port', 'path', 'params', 'query', 'fragment'] rand_sub(arr,3) rand_sub(arr,3) rand_sub(arr,3) rand_sub(arr) rand_sub(arr) rand_sub(arr) ''' arr = copy.deepcopy(arr) lngth = arr.__len__() args = list(args) if(args.__len__() == 0): n = random.randrange(0,lngth) else: n = args[0] if(n>lngth): n = lngth else: pass indexes = rand_some_indexes(0,lngth,n,**kwargs) narr = select_seqs_keep_order(arr,indexes) return(narr)
def function[rand_sub, parameter[arr]]: constant[ arr = ['scheme', 'username', 'password', 'hostname', 'port', 'path', 'params', 'query', 'fragment'] rand_sub(arr,3) rand_sub(arr,3) rand_sub(arr,3) rand_sub(arr) rand_sub(arr) rand_sub(arr) ] variable[arr] assign[=] call[name[copy].deepcopy, parameter[name[arr]]] variable[lngth] assign[=] call[name[arr].__len__, parameter[]] variable[args] assign[=] call[name[list], parameter[name[args]]] if compare[call[name[args].__len__, parameter[]] equal[==] constant[0]] begin[:] variable[n] assign[=] call[name[random].randrange, parameter[constant[0], name[lngth]]] variable[indexes] assign[=] call[name[rand_some_indexes], parameter[constant[0], name[lngth], name[n]]] variable[narr] assign[=] call[name[select_seqs_keep_order], parameter[name[arr], name[indexes]]] return[name[narr]]
keyword[def] identifier[rand_sub] ( identifier[arr] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[arr] = identifier[copy] . identifier[deepcopy] ( identifier[arr] ) identifier[lngth] = identifier[arr] . identifier[__len__] () identifier[args] = identifier[list] ( identifier[args] ) keyword[if] ( identifier[args] . identifier[__len__] ()== literal[int] ): identifier[n] = identifier[random] . identifier[randrange] ( literal[int] , identifier[lngth] ) keyword[else] : identifier[n] = identifier[args] [ literal[int] ] keyword[if] ( identifier[n] > identifier[lngth] ): identifier[n] = identifier[lngth] keyword[else] : keyword[pass] identifier[indexes] = identifier[rand_some_indexes] ( literal[int] , identifier[lngth] , identifier[n] ,** identifier[kwargs] ) identifier[narr] = identifier[select_seqs_keep_order] ( identifier[arr] , identifier[indexes] ) keyword[return] ( identifier[narr] )
def rand_sub(arr, *args, **kwargs): """ arr = ['scheme', 'username', 'password', 'hostname', 'port', 'path', 'params', 'query', 'fragment'] rand_sub(arr,3) rand_sub(arr,3) rand_sub(arr,3) rand_sub(arr) rand_sub(arr) rand_sub(arr) """ arr = copy.deepcopy(arr) lngth = arr.__len__() args = list(args) if args.__len__() == 0: n = random.randrange(0, lngth) # depends on [control=['if'], data=[]] else: n = args[0] if n > lngth: n = lngth # depends on [control=['if'], data=['n', 'lngth']] else: pass indexes = rand_some_indexes(0, lngth, n, **kwargs) narr = select_seqs_keep_order(arr, indexes) return narr
def Unbind(method): # pylint: disable=C0103 """ The ``@Unbind`` callback decorator is called when a component dependency is unbound. The decorated method must accept the injected service object and its :class:`~pelix.framework.ServiceReference` as arguments:: @Unbind def unbind_method(self, service, service_reference): ''' service: The previously injected service instance. service_reference: Its ServiceReference ''' # ... If the service is a required one, the unbind callback is called **after** the component has been invalidated. Exceptions raised by an unbind callback are ignored. :param method: The decorated method :raise TypeError: The decorated element is not a valid function """ if not isinstance(method, types.FunctionType): raise TypeError("@Unbind can only be applied on functions") # Tests the number of parameters validate_method_arity(method, "service", "service_reference") _append_object_entry( method, constants.IPOPO_METHOD_CALLBACKS, constants.IPOPO_CALLBACK_UNBIND, ) return method
def function[Unbind, parameter[method]]: constant[ The ``@Unbind`` callback decorator is called when a component dependency is unbound. The decorated method must accept the injected service object and its :class:`~pelix.framework.ServiceReference` as arguments:: @Unbind def unbind_method(self, service, service_reference): ''' service: The previously injected service instance. service_reference: Its ServiceReference ''' # ... If the service is a required one, the unbind callback is called **after** the component has been invalidated. Exceptions raised by an unbind callback are ignored. :param method: The decorated method :raise TypeError: The decorated element is not a valid function ] if <ast.UnaryOp object at 0x7da1b04019c0> begin[:] <ast.Raise object at 0x7da1b0401870> call[name[validate_method_arity], parameter[name[method], constant[service], constant[service_reference]]] call[name[_append_object_entry], parameter[name[method], name[constants].IPOPO_METHOD_CALLBACKS, name[constants].IPOPO_CALLBACK_UNBIND]] return[name[method]]
keyword[def] identifier[Unbind] ( identifier[method] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[method] , identifier[types] . identifier[FunctionType] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[validate_method_arity] ( identifier[method] , literal[string] , literal[string] ) identifier[_append_object_entry] ( identifier[method] , identifier[constants] . identifier[IPOPO_METHOD_CALLBACKS] , identifier[constants] . identifier[IPOPO_CALLBACK_UNBIND] , ) keyword[return] identifier[method]
def Unbind(method): # pylint: disable=C0103 "\n The ``@Unbind`` callback decorator is called when a component dependency is\n unbound.\n\n The decorated method must accept the injected service object and its\n :class:`~pelix.framework.ServiceReference` as arguments::\n\n @Unbind\n def unbind_method(self, service, service_reference):\n '''\n service: The previously injected service instance.\n service_reference: Its ServiceReference\n '''\n # ...\n\n If the service is a required one, the unbind callback is called **after**\n the component has been invalidated.\n\n Exceptions raised by an unbind callback are ignored.\n\n :param method: The decorated method\n :raise TypeError: The decorated element is not a valid function\n " if not isinstance(method, types.FunctionType): raise TypeError('@Unbind can only be applied on functions') # depends on [control=['if'], data=[]] # Tests the number of parameters validate_method_arity(method, 'service', 'service_reference') _append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS, constants.IPOPO_CALLBACK_UNBIND) return method
def analysis_set_properties(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /analysis-xxxx/setProperties API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fanalysis-xxxx%2FsetProperties """ return DXHTTPRequest('/%s/setProperties' % object_id, input_params, always_retry=always_retry, **kwargs)
def function[analysis_set_properties, parameter[object_id, input_params, always_retry]]: constant[ Invokes the /analysis-xxxx/setProperties API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fanalysis-xxxx%2FsetProperties ] return[call[name[DXHTTPRequest], parameter[binary_operation[constant[/%s/setProperties] <ast.Mod object at 0x7da2590d6920> name[object_id]], name[input_params]]]]
keyword[def] identifier[analysis_set_properties] ( identifier[object_id] , identifier[input_params] ={}, identifier[always_retry] = keyword[True] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[DXHTTPRequest] ( literal[string] % identifier[object_id] , identifier[input_params] , identifier[always_retry] = identifier[always_retry] ,** identifier[kwargs] )
def analysis_set_properties(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /analysis-xxxx/setProperties API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fanalysis-xxxx%2FsetProperties """ return DXHTTPRequest('/%s/setProperties' % object_id, input_params, always_retry=always_retry, **kwargs)
def onKey(self, event): """ Copy selection if control down and 'c' """ if event.CmdDown() or event.ControlDown(): if event.GetKeyCode() == 67: self.onCopySelection(None)
def function[onKey, parameter[self, event]]: constant[ Copy selection if control down and 'c' ] if <ast.BoolOp object at 0x7da18bccb9a0> begin[:] if compare[call[name[event].GetKeyCode, parameter[]] equal[==] constant[67]] begin[:] call[name[self].onCopySelection, parameter[constant[None]]]
keyword[def] identifier[onKey] ( identifier[self] , identifier[event] ): literal[string] keyword[if] identifier[event] . identifier[CmdDown] () keyword[or] identifier[event] . identifier[ControlDown] (): keyword[if] identifier[event] . identifier[GetKeyCode] ()== literal[int] : identifier[self] . identifier[onCopySelection] ( keyword[None] )
def onKey(self, event): """ Copy selection if control down and 'c' """ if event.CmdDown() or event.ControlDown(): if event.GetKeyCode() == 67: self.onCopySelection(None) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def generate_xliff(entry_dict): """ Given a dictionary with keys = ids and values equals to strings generates and xliff file to send to unbabel. Example: {"123": "This is blue car", "234": "This house is yellow" } returns <xliff version = "1.2"> <file original = "" source-language = "en" target-language = "fr"> <head> </ head> <body> <trans-unit id = "14077"> <source> T2 apartment, as new building with swimming pool, sauna and gym. Inserted in Quinta da Beloura 1, which offers a variety of services such as private security 24 hours, tennis, golf, hotel, restaurants, and more. The apartment has air conditioning in all rooms, central heating, balcony and security screen for children in all windows. </ source> </ trans-unit> </ body> </ file> </ xliff> """ entries = "" for key,value in entry_dict.iteritems(): entries+=create_trans_unit(key,value).strip()+"\n" xliff_str = get_head_xliff().strip()+"\n"+entries+get_tail_xliff().strip() return xliff_str
def function[generate_xliff, parameter[entry_dict]]: constant[ Given a dictionary with keys = ids and values equals to strings generates and xliff file to send to unbabel. Example: {"123": "This is blue car", "234": "This house is yellow" } returns <xliff version = "1.2"> <file original = "" source-language = "en" target-language = "fr"> <head> </ head> <body> <trans-unit id = "14077"> <source> T2 apartment, as new building with swimming pool, sauna and gym. Inserted in Quinta da Beloura 1, which offers a variety of services such as private security 24 hours, tennis, golf, hotel, restaurants, and more. The apartment has air conditioning in all rooms, central heating, balcony and security screen for children in all windows. </ source> </ trans-unit> </ body> </ file> </ xliff> ] variable[entries] assign[=] constant[] for taget[tuple[[<ast.Name object at 0x7da1b2219540>, <ast.Name object at 0x7da1b2219450>]]] in starred[call[name[entry_dict].iteritems, parameter[]]] begin[:] <ast.AugAssign object at 0x7da1b2219030> variable[xliff_str] assign[=] binary_operation[binary_operation[binary_operation[call[call[name[get_head_xliff], parameter[]].strip, parameter[]] + constant[ ]] + name[entries]] + call[call[name[get_tail_xliff], parameter[]].strip, parameter[]]] return[name[xliff_str]]
keyword[def] identifier[generate_xliff] ( identifier[entry_dict] ): literal[string] identifier[entries] = literal[string] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[entry_dict] . identifier[iteritems] (): identifier[entries] += identifier[create_trans_unit] ( identifier[key] , identifier[value] ). identifier[strip] ()+ literal[string] identifier[xliff_str] = identifier[get_head_xliff] (). identifier[strip] ()+ literal[string] + identifier[entries] + identifier[get_tail_xliff] (). identifier[strip] () keyword[return] identifier[xliff_str]
def generate_xliff(entry_dict): """ Given a dictionary with keys = ids and values equals to strings generates and xliff file to send to unbabel. Example: {"123": "This is blue car", "234": "This house is yellow" } returns <xliff version = "1.2"> <file original = "" source-language = "en" target-language = "fr"> <head> </ head> <body> <trans-unit id = "14077"> <source> T2 apartment, as new building with swimming pool, sauna and gym. Inserted in Quinta da Beloura 1, which offers a variety of services such as private security 24 hours, tennis, golf, hotel, restaurants, and more. The apartment has air conditioning in all rooms, central heating, balcony and security screen for children in all windows. </ source> </ trans-unit> </ body> </ file> </ xliff> """ entries = '' for (key, value) in entry_dict.iteritems(): entries += create_trans_unit(key, value).strip() + '\n' # depends on [control=['for'], data=[]] xliff_str = get_head_xliff().strip() + '\n' + entries + get_tail_xliff().strip() return xliff_str
def inject(self): """ Recursively inject aXe into all iframes and the top level document. :param script_url: location of the axe-core script. :type script_url: string """ with open(self.script_url, "r", encoding="utf8") as f: self.selenium.execute_script(f.read())
def function[inject, parameter[self]]: constant[ Recursively inject aXe into all iframes and the top level document. :param script_url: location of the axe-core script. :type script_url: string ] with call[name[open], parameter[name[self].script_url, constant[r]]] begin[:] call[name[self].selenium.execute_script, parameter[call[name[f].read, parameter[]]]]
keyword[def] identifier[inject] ( identifier[self] ): literal[string] keyword[with] identifier[open] ( identifier[self] . identifier[script_url] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] : identifier[self] . identifier[selenium] . identifier[execute_script] ( identifier[f] . identifier[read] ())
def inject(self): """ Recursively inject aXe into all iframes and the top level document. :param script_url: location of the axe-core script. :type script_url: string """ with open(self.script_url, 'r', encoding='utf8') as f: self.selenium.execute_script(f.read()) # depends on [control=['with'], data=['f']]
def header(self, name, default=None): """ Returns the value of the HTTP header identified by `name`. """ wsgi_header = "HTTP_{0}".format(name.upper()) try: return self.env_raw[wsgi_header] except KeyError: return default
def function[header, parameter[self, name, default]]: constant[ Returns the value of the HTTP header identified by `name`. ] variable[wsgi_header] assign[=] call[constant[HTTP_{0}].format, parameter[call[name[name].upper, parameter[]]]] <ast.Try object at 0x7da1b0f203d0>
keyword[def] identifier[header] ( identifier[self] , identifier[name] , identifier[default] = keyword[None] ): literal[string] identifier[wsgi_header] = literal[string] . identifier[format] ( identifier[name] . identifier[upper] ()) keyword[try] : keyword[return] identifier[self] . identifier[env_raw] [ identifier[wsgi_header] ] keyword[except] identifier[KeyError] : keyword[return] identifier[default]
def header(self, name, default=None): """ Returns the value of the HTTP header identified by `name`. """ wsgi_header = 'HTTP_{0}'.format(name.upper()) try: return self.env_raw[wsgi_header] # depends on [control=['try'], data=[]] except KeyError: return default # depends on [control=['except'], data=[]]
def load_meta_data(self, path=None, recursively=True): """Load meta data of state machine model from the file system The meta data of the state machine model is loaded from the file system and stored in the meta property of the model. Existing meta data is removed. Also the meta data of root state and children is loaded. :param str path: Optional path to the meta data file. If not given, the path will be derived from the state machine's path on the filesystem """ meta_data_path = path if path is not None else self.state_machine.file_system_path if meta_data_path: path_meta_data = os.path.join(meta_data_path, storage.FILE_NAME_META_DATA) try: tmp_meta = storage.load_data_file(path_meta_data) except ValueError: tmp_meta = {} else: tmp_meta = {} # JSON returns a dict, which must be converted to a Vividict tmp_meta = Vividict(tmp_meta) if recursively: root_state_path = None if not path else os.path.join(path, self.root_state.state.state_id) self.root_state.load_meta_data(root_state_path) if tmp_meta: # assign the meta data to the state self.meta = tmp_meta self.meta_signal.emit(MetaSignalMsg("load_meta_data", "all", True))
def function[load_meta_data, parameter[self, path, recursively]]: constant[Load meta data of state machine model from the file system The meta data of the state machine model is loaded from the file system and stored in the meta property of the model. Existing meta data is removed. Also the meta data of root state and children is loaded. :param str path: Optional path to the meta data file. If not given, the path will be derived from the state machine's path on the filesystem ] variable[meta_data_path] assign[=] <ast.IfExp object at 0x7da1b1a3d990> if name[meta_data_path] begin[:] variable[path_meta_data] assign[=] call[name[os].path.join, parameter[name[meta_data_path], name[storage].FILE_NAME_META_DATA]] <ast.Try object at 0x7da1b1a3c820> variable[tmp_meta] assign[=] call[name[Vividict], parameter[name[tmp_meta]]] if name[recursively] begin[:] variable[root_state_path] assign[=] <ast.IfExp object at 0x7da1b1a3fc70> call[name[self].root_state.load_meta_data, parameter[name[root_state_path]]] if name[tmp_meta] begin[:] name[self].meta assign[=] name[tmp_meta] call[name[self].meta_signal.emit, parameter[call[name[MetaSignalMsg], parameter[constant[load_meta_data], constant[all], constant[True]]]]]
keyword[def] identifier[load_meta_data] ( identifier[self] , identifier[path] = keyword[None] , identifier[recursively] = keyword[True] ): literal[string] identifier[meta_data_path] = identifier[path] keyword[if] identifier[path] keyword[is] keyword[not] keyword[None] keyword[else] identifier[self] . identifier[state_machine] . identifier[file_system_path] keyword[if] identifier[meta_data_path] : identifier[path_meta_data] = identifier[os] . identifier[path] . identifier[join] ( identifier[meta_data_path] , identifier[storage] . identifier[FILE_NAME_META_DATA] ) keyword[try] : identifier[tmp_meta] = identifier[storage] . identifier[load_data_file] ( identifier[path_meta_data] ) keyword[except] identifier[ValueError] : identifier[tmp_meta] ={} keyword[else] : identifier[tmp_meta] ={} identifier[tmp_meta] = identifier[Vividict] ( identifier[tmp_meta] ) keyword[if] identifier[recursively] : identifier[root_state_path] = keyword[None] keyword[if] keyword[not] identifier[path] keyword[else] identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[self] . identifier[root_state] . identifier[state] . identifier[state_id] ) identifier[self] . identifier[root_state] . identifier[load_meta_data] ( identifier[root_state_path] ) keyword[if] identifier[tmp_meta] : identifier[self] . identifier[meta] = identifier[tmp_meta] identifier[self] . identifier[meta_signal] . identifier[emit] ( identifier[MetaSignalMsg] ( literal[string] , literal[string] , keyword[True] ))
def load_meta_data(self, path=None, recursively=True): """Load meta data of state machine model from the file system The meta data of the state machine model is loaded from the file system and stored in the meta property of the model. Existing meta data is removed. Also the meta data of root state and children is loaded. :param str path: Optional path to the meta data file. If not given, the path will be derived from the state machine's path on the filesystem """ meta_data_path = path if path is not None else self.state_machine.file_system_path if meta_data_path: path_meta_data = os.path.join(meta_data_path, storage.FILE_NAME_META_DATA) try: tmp_meta = storage.load_data_file(path_meta_data) # depends on [control=['try'], data=[]] except ValueError: tmp_meta = {} # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: tmp_meta = {} # JSON returns a dict, which must be converted to a Vividict tmp_meta = Vividict(tmp_meta) if recursively: root_state_path = None if not path else os.path.join(path, self.root_state.state.state_id) self.root_state.load_meta_data(root_state_path) # depends on [control=['if'], data=[]] if tmp_meta: # assign the meta data to the state self.meta = tmp_meta self.meta_signal.emit(MetaSignalMsg('load_meta_data', 'all', True)) # depends on [control=['if'], data=[]]
def style(font): """Determine font style from canonical filename.""" from fontbakery.constants import STATIC_STYLE_NAMES filename = os.path.basename(font) if '-' in filename: stylename = os.path.splitext(filename)[0].split('-')[1] if stylename in [name.replace(' ', '') for name in STATIC_STYLE_NAMES]: return stylename return None
def function[style, parameter[font]]: constant[Determine font style from canonical filename.] from relative_module[fontbakery.constants] import module[STATIC_STYLE_NAMES] variable[filename] assign[=] call[name[os].path.basename, parameter[name[font]]] if compare[constant[-] in name[filename]] begin[:] variable[stylename] assign[=] call[call[call[call[name[os].path.splitext, parameter[name[filename]]]][constant[0]].split, parameter[constant[-]]]][constant[1]] if compare[name[stylename] in <ast.ListComp object at 0x7da18f00ece0>] begin[:] return[name[stylename]] return[constant[None]]
keyword[def] identifier[style] ( identifier[font] ): literal[string] keyword[from] identifier[fontbakery] . identifier[constants] keyword[import] identifier[STATIC_STYLE_NAMES] identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[font] ) keyword[if] literal[string] keyword[in] identifier[filename] : identifier[stylename] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ] keyword[if] identifier[stylename] keyword[in] [ identifier[name] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[name] keyword[in] identifier[STATIC_STYLE_NAMES] ]: keyword[return] identifier[stylename] keyword[return] keyword[None]
def style(font): """Determine font style from canonical filename.""" from fontbakery.constants import STATIC_STYLE_NAMES filename = os.path.basename(font) if '-' in filename: stylename = os.path.splitext(filename)[0].split('-')[1] if stylename in [name.replace(' ', '') for name in STATIC_STYLE_NAMES]: return stylename # depends on [control=['if'], data=['stylename']] # depends on [control=['if'], data=['filename']] return None
def from_date(self, value: date) -> datetime: """ Initializes from the given date value """ assert isinstance(value, date) #self.value = datetime.combine(value, time.min) self.value = datetime(value.year, value.month, value.day) return self.value
def function[from_date, parameter[self, value]]: constant[ Initializes from the given date value ] assert[call[name[isinstance], parameter[name[value], name[date]]]] name[self].value assign[=] call[name[datetime], parameter[name[value].year, name[value].month, name[value].day]] return[name[self].value]
keyword[def] identifier[from_date] ( identifier[self] , identifier[value] : identifier[date] )-> identifier[datetime] : literal[string] keyword[assert] identifier[isinstance] ( identifier[value] , identifier[date] ) identifier[self] . identifier[value] = identifier[datetime] ( identifier[value] . identifier[year] , identifier[value] . identifier[month] , identifier[value] . identifier[day] ) keyword[return] identifier[self] . identifier[value]
def from_date(self, value: date) -> datetime: """ Initializes from the given date value """ assert isinstance(value, date) #self.value = datetime.combine(value, time.min) self.value = datetime(value.year, value.month, value.day) return self.value
def get(self): """ Get a JSON-ready representation of this Mail object. :returns: This Mail object, ready for use in a request body. :rtype: dict """ mail = { 'from': self._get_or_none(self.from_email), 'subject': self._get_or_none(self.subject), 'personalizations': [p.get() for p in self.personalizations or []], 'content': [c.get() for c in self.contents or []], 'attachments': [a.get() for a in self.attachments or []], 'template_id': self._get_or_none(self.template_id), 'sections': self._flatten_dicts(self.sections), 'headers': self._flatten_dicts(self.headers), 'categories': [c.get() for c in self.categories or []], 'custom_args': self._flatten_dicts(self.custom_args), 'send_at': self._get_or_none(self.send_at), 'batch_id': self._get_or_none(self.batch_id), 'asm': self._get_or_none(self.asm), 'ip_pool_name': self._get_or_none(self.ip_pool_name), 'mail_settings': self._get_or_none(self.mail_settings), 'tracking_settings': self._get_or_none(self.tracking_settings), 'reply_to': self._get_or_none(self.reply_to), } return {key: value for key, value in mail.items() if value is not None and value != [] and value != {}}
def function[get, parameter[self]]: constant[ Get a JSON-ready representation of this Mail object. :returns: This Mail object, ready for use in a request body. :rtype: dict ] variable[mail] assign[=] dictionary[[<ast.Constant object at 0x7da1b23461a0>, <ast.Constant object at 0x7da1b2344b50>, <ast.Constant object at 0x7da1b23467d0>, <ast.Constant object at 0x7da1b2346020>, <ast.Constant object at 0x7da1b2347b20>, <ast.Constant object at 0x7da1b2345300>, <ast.Constant object at 0x7da1b2344820>, <ast.Constant object at 0x7da1b2346740>, <ast.Constant object at 0x7da1b2347340>, <ast.Constant object at 0x7da1b2345690>, <ast.Constant object at 0x7da1b2345f90>, <ast.Constant object at 0x7da1b2344fd0>, <ast.Constant object at 0x7da1b23462c0>, <ast.Constant object at 0x7da1b23449a0>, <ast.Constant object at 0x7da1b2344880>, <ast.Constant object at 0x7da1b23477c0>, <ast.Constant object at 0x7da1b2344dc0>], [<ast.Call object at 0x7da1b2346770>, <ast.Call object at 0x7da1b2345570>, <ast.ListComp object at 0x7da1b2346230>, <ast.ListComp object at 0x7da1b23450c0>, <ast.ListComp object at 0x7da1b23460e0>, <ast.Call object at 0x7da1b2346800>, <ast.Call object at 0x7da1b2347c70>, <ast.Call object at 0x7da1b2345f00>, <ast.ListComp object at 0x7da1b2344280>, <ast.Call object at 0x7da1b23452a0>, <ast.Call object at 0x7da1b23441f0>, <ast.Call object at 0x7da1b23449d0>, <ast.Call object at 0x7da1b23463b0>, <ast.Call object at 0x7da1b2347790>, <ast.Call object at 0x7da1b2345e70>, <ast.Call object at 0x7da1b2347af0>, <ast.Call object at 0x7da1b2345720>]] return[<ast.DictComp object at 0x7da1b2347280>]
keyword[def] identifier[get] ( identifier[self] ): literal[string] identifier[mail] ={ literal[string] : identifier[self] . identifier[_get_or_none] ( identifier[self] . identifier[from_email] ), literal[string] : identifier[self] . identifier[_get_or_none] ( identifier[self] . identifier[subject] ), literal[string] :[ identifier[p] . identifier[get] () keyword[for] identifier[p] keyword[in] identifier[self] . identifier[personalizations] keyword[or] []], literal[string] :[ identifier[c] . identifier[get] () keyword[for] identifier[c] keyword[in] identifier[self] . identifier[contents] keyword[or] []], literal[string] :[ identifier[a] . identifier[get] () keyword[for] identifier[a] keyword[in] identifier[self] . identifier[attachments] keyword[or] []], literal[string] : identifier[self] . identifier[_get_or_none] ( identifier[self] . identifier[template_id] ), literal[string] : identifier[self] . identifier[_flatten_dicts] ( identifier[self] . identifier[sections] ), literal[string] : identifier[self] . identifier[_flatten_dicts] ( identifier[self] . identifier[headers] ), literal[string] :[ identifier[c] . identifier[get] () keyword[for] identifier[c] keyword[in] identifier[self] . identifier[categories] keyword[or] []], literal[string] : identifier[self] . identifier[_flatten_dicts] ( identifier[self] . identifier[custom_args] ), literal[string] : identifier[self] . identifier[_get_or_none] ( identifier[self] . identifier[send_at] ), literal[string] : identifier[self] . identifier[_get_or_none] ( identifier[self] . identifier[batch_id] ), literal[string] : identifier[self] . identifier[_get_or_none] ( identifier[self] . identifier[asm] ), literal[string] : identifier[self] . identifier[_get_or_none] ( identifier[self] . identifier[ip_pool_name] ), literal[string] : identifier[self] . identifier[_get_or_none] ( identifier[self] . identifier[mail_settings] ), literal[string] : identifier[self] . identifier[_get_or_none] ( identifier[self] . identifier[tracking_settings] ), literal[string] : identifier[self] . identifier[_get_or_none] ( identifier[self] . identifier[reply_to] ), } keyword[return] { identifier[key] : identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[mail] . identifier[items] () keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] keyword[and] identifier[value] !=[] keyword[and] identifier[value] !={}}
def get(self): """ Get a JSON-ready representation of this Mail object. :returns: This Mail object, ready for use in a request body. :rtype: dict """ mail = {'from': self._get_or_none(self.from_email), 'subject': self._get_or_none(self.subject), 'personalizations': [p.get() for p in self.personalizations or []], 'content': [c.get() for c in self.contents or []], 'attachments': [a.get() for a in self.attachments or []], 'template_id': self._get_or_none(self.template_id), 'sections': self._flatten_dicts(self.sections), 'headers': self._flatten_dicts(self.headers), 'categories': [c.get() for c in self.categories or []], 'custom_args': self._flatten_dicts(self.custom_args), 'send_at': self._get_or_none(self.send_at), 'batch_id': self._get_or_none(self.batch_id), 'asm': self._get_or_none(self.asm), 'ip_pool_name': self._get_or_none(self.ip_pool_name), 'mail_settings': self._get_or_none(self.mail_settings), 'tracking_settings': self._get_or_none(self.tracking_settings), 'reply_to': self._get_or_none(self.reply_to)} return {key: value for (key, value) in mail.items() if value is not None and value != [] and (value != {})}
def __get_note_award_emoji(self, item_type, item_id, note_id): """Fetch emojis for a note of an issue/merge request""" emojis = [] group_emojis = self.client.note_emojis(item_type, item_id, note_id) try: for raw_emojis in group_emojis: for emoji in json.loads(raw_emojis): emojis.append(emoji) except requests.exceptions.HTTPError as error: if error.response.status_code == 404: logger.warning("Emojis not available for %s ", urijoin(item_type, str(item_id), GitLabClient.NOTES, str(note_id), GitLabClient.EMOJI)) return emojis return emojis
def function[__get_note_award_emoji, parameter[self, item_type, item_id, note_id]]: constant[Fetch emojis for a note of an issue/merge request] variable[emojis] assign[=] list[[]] variable[group_emojis] assign[=] call[name[self].client.note_emojis, parameter[name[item_type], name[item_id], name[note_id]]] <ast.Try object at 0x7da1b033b190> return[name[emojis]]
keyword[def] identifier[__get_note_award_emoji] ( identifier[self] , identifier[item_type] , identifier[item_id] , identifier[note_id] ): literal[string] identifier[emojis] =[] identifier[group_emojis] = identifier[self] . identifier[client] . identifier[note_emojis] ( identifier[item_type] , identifier[item_id] , identifier[note_id] ) keyword[try] : keyword[for] identifier[raw_emojis] keyword[in] identifier[group_emojis] : keyword[for] identifier[emoji] keyword[in] identifier[json] . identifier[loads] ( identifier[raw_emojis] ): identifier[emojis] . identifier[append] ( identifier[emoji] ) keyword[except] identifier[requests] . identifier[exceptions] . identifier[HTTPError] keyword[as] identifier[error] : keyword[if] identifier[error] . identifier[response] . identifier[status_code] == literal[int] : identifier[logger] . identifier[warning] ( literal[string] , identifier[urijoin] ( identifier[item_type] , identifier[str] ( identifier[item_id] ), identifier[GitLabClient] . identifier[NOTES] , identifier[str] ( identifier[note_id] ), identifier[GitLabClient] . identifier[EMOJI] )) keyword[return] identifier[emojis] keyword[return] identifier[emojis]
def __get_note_award_emoji(self, item_type, item_id, note_id): """Fetch emojis for a note of an issue/merge request""" emojis = [] group_emojis = self.client.note_emojis(item_type, item_id, note_id) try: for raw_emojis in group_emojis: for emoji in json.loads(raw_emojis): emojis.append(emoji) # depends on [control=['for'], data=['emoji']] # depends on [control=['for'], data=['raw_emojis']] # depends on [control=['try'], data=[]] except requests.exceptions.HTTPError as error: if error.response.status_code == 404: logger.warning('Emojis not available for %s ', urijoin(item_type, str(item_id), GitLabClient.NOTES, str(note_id), GitLabClient.EMOJI)) return emojis # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['error']] return emojis
def ClearAllVar(self): """Clear this Value.""" self.value = None # Call OnClearAllVar on options. _ = [option.OnClearAllVar() for option in self.options]
def function[ClearAllVar, parameter[self]]: constant[Clear this Value.] name[self].value assign[=] constant[None] variable[_] assign[=] <ast.ListComp object at 0x7da1b17d4c10>
keyword[def] identifier[ClearAllVar] ( identifier[self] ): literal[string] identifier[self] . identifier[value] = keyword[None] identifier[_] =[ identifier[option] . identifier[OnClearAllVar] () keyword[for] identifier[option] keyword[in] identifier[self] . identifier[options] ]
def ClearAllVar(self): """Clear this Value.""" self.value = None # Call OnClearAllVar on options. _ = [option.OnClearAllVar() for option in self.options]
def roll_up( df, levels: List[str], groupby_vars: List[str], extra_groupby_cols: List[str] = None, var_name: str = 'type', value_name: str = 'value', agg_func: str = 'sum', drop_levels: List[str] = None ): """ Creates aggregates following a given hierarchy --- ### Parameters *mandatory :* - `levels` (*list of str*): name of the columns composing the hierarchy (from the top to the bottom level). - `groupby_vars` (*list of str*): name of the columns with value to aggregate. - `extra_groupby_cols` (*list of str*) optional: other columns used to group in each level. *optional :* - `var_name` (*str*) : name of the result variable column. By default, `“type”`. - `value_name` (*str*): name of the result value column. By default, `“value”`. - `agg_func` (*str*): name of the aggregation operation. By default, `“sum”`. - `drop_levels` (*list of str*): the names of the levels that you may want to discard from the output. --- ### Example **Input** | Region | City | Population | |:---------:|:--------:|:-----------:| | Idf | Panam| 200 | | Idf | Antony | 50 | | Nord | Lille | 20 | ```cson roll_up: levels: ["Region", "City"] groupby_vars: "Population" ``` **Output** | Region | City | Population | value | type | |:---------:|:--------:|:-----------:|:--------:|:------:| | Idf | Panam| 200 | Panam | City | | Idf | Antony | 50 | Antony | City | | Nord | Lille | 20 | Lille | City | | Idf | Nan | 250 | Idf | Region | | Nord | Nan | 20 | Nord | Region | """ dfs = list() groupby_cols_cpy = list(levels) levels_cpy = list(levels) levels_cpy.reverse() extra_groupby_cols = extra_groupby_cols or [] drop_levels = drop_levels or [] previous_level = None for top_level in levels_cpy: # Aggregation gb_df = getattr( df.groupby(groupby_cols_cpy + extra_groupby_cols)[groupby_vars], agg_func)().reset_index() # Melt-like columns gb_df[var_name] = top_level gb_df[value_name] = gb_df[top_level] dfs.append(gb_df) if previous_level in drop_levels: del dfs[-2] previous_level = top_level # Remove one level each time in the groupby: lowest level column needs # a groupby with every levels, the next level needs every one except # the lowest, etc. until the top level column that needs only itself # inside the groupby. groupby_cols_cpy.pop() return pd.concat(dfs, sort=False).reset_index()
def function[roll_up, parameter[df, levels, groupby_vars, extra_groupby_cols, var_name, value_name, agg_func, drop_levels]]: constant[ Creates aggregates following a given hierarchy --- ### Parameters *mandatory :* - `levels` (*list of str*): name of the columns composing the hierarchy (from the top to the bottom level). - `groupby_vars` (*list of str*): name of the columns with value to aggregate. - `extra_groupby_cols` (*list of str*) optional: other columns used to group in each level. *optional :* - `var_name` (*str*) : name of the result variable column. By default, `“type”`. - `value_name` (*str*): name of the result value column. By default, `“value”`. - `agg_func` (*str*): name of the aggregation operation. By default, `“sum”`. - `drop_levels` (*list of str*): the names of the levels that you may want to discard from the output. --- ### Example **Input** | Region | City | Population | |:---------:|:--------:|:-----------:| | Idf | Panam| 200 | | Idf | Antony | 50 | | Nord | Lille | 20 | ```cson roll_up: levels: ["Region", "City"] groupby_vars: "Population" ``` **Output** | Region | City | Population | value | type | |:---------:|:--------:|:-----------:|:--------:|:------:| | Idf | Panam| 200 | Panam | City | | Idf | Antony | 50 | Antony | City | | Nord | Lille | 20 | Lille | City | | Idf | Nan | 250 | Idf | Region | | Nord | Nan | 20 | Nord | Region | ] variable[dfs] assign[=] call[name[list], parameter[]] variable[groupby_cols_cpy] assign[=] call[name[list], parameter[name[levels]]] variable[levels_cpy] assign[=] call[name[list], parameter[name[levels]]] call[name[levels_cpy].reverse, parameter[]] variable[extra_groupby_cols] assign[=] <ast.BoolOp object at 0x7da1b0395d20> variable[drop_levels] assign[=] <ast.BoolOp object at 0x7da1b03960e0> variable[previous_level] assign[=] constant[None] for taget[name[top_level]] in starred[name[levels_cpy]] begin[:] variable[gb_df] assign[=] call[call[call[name[getattr], parameter[call[call[name[df].groupby, parameter[binary_operation[name[groupby_cols_cpy] + name[extra_groupby_cols]]]]][name[groupby_vars]], name[agg_func]]], parameter[]].reset_index, parameter[]] call[name[gb_df]][name[var_name]] assign[=] name[top_level] call[name[gb_df]][name[value_name]] assign[=] call[name[gb_df]][name[top_level]] call[name[dfs].append, parameter[name[gb_df]]] if compare[name[previous_level] in name[drop_levels]] begin[:] <ast.Delete object at 0x7da1b0394b80> variable[previous_level] assign[=] name[top_level] call[name[groupby_cols_cpy].pop, parameter[]] return[call[call[name[pd].concat, parameter[name[dfs]]].reset_index, parameter[]]]
keyword[def] identifier[roll_up] ( identifier[df] , identifier[levels] : identifier[List] [ identifier[str] ], identifier[groupby_vars] : identifier[List] [ identifier[str] ], identifier[extra_groupby_cols] : identifier[List] [ identifier[str] ]= keyword[None] , identifier[var_name] : identifier[str] = literal[string] , identifier[value_name] : identifier[str] = literal[string] , identifier[agg_func] : identifier[str] = literal[string] , identifier[drop_levels] : identifier[List] [ identifier[str] ]= keyword[None] ): literal[string] identifier[dfs] = identifier[list] () identifier[groupby_cols_cpy] = identifier[list] ( identifier[levels] ) identifier[levels_cpy] = identifier[list] ( identifier[levels] ) identifier[levels_cpy] . identifier[reverse] () identifier[extra_groupby_cols] = identifier[extra_groupby_cols] keyword[or] [] identifier[drop_levels] = identifier[drop_levels] keyword[or] [] identifier[previous_level] = keyword[None] keyword[for] identifier[top_level] keyword[in] identifier[levels_cpy] : identifier[gb_df] = identifier[getattr] ( identifier[df] . identifier[groupby] ( identifier[groupby_cols_cpy] + identifier[extra_groupby_cols] )[ identifier[groupby_vars] ], identifier[agg_func] )(). identifier[reset_index] () identifier[gb_df] [ identifier[var_name] ]= identifier[top_level] identifier[gb_df] [ identifier[value_name] ]= identifier[gb_df] [ identifier[top_level] ] identifier[dfs] . identifier[append] ( identifier[gb_df] ) keyword[if] identifier[previous_level] keyword[in] identifier[drop_levels] : keyword[del] identifier[dfs] [- literal[int] ] identifier[previous_level] = identifier[top_level] identifier[groupby_cols_cpy] . identifier[pop] () keyword[return] identifier[pd] . identifier[concat] ( identifier[dfs] , identifier[sort] = keyword[False] ). identifier[reset_index] ()
def roll_up(df, levels: List[str], groupby_vars: List[str], extra_groupby_cols: List[str]=None, var_name: str='type', value_name: str='value', agg_func: str='sum', drop_levels: List[str]=None): """ Creates aggregates following a given hierarchy --- ### Parameters *mandatory :* - `levels` (*list of str*): name of the columns composing the hierarchy (from the top to the bottom level). - `groupby_vars` (*list of str*): name of the columns with value to aggregate. - `extra_groupby_cols` (*list of str*) optional: other columns used to group in each level. *optional :* - `var_name` (*str*) : name of the result variable column. By default, `“type”`. - `value_name` (*str*): name of the result value column. By default, `“value”`. - `agg_func` (*str*): name of the aggregation operation. By default, `“sum”`. - `drop_levels` (*list of str*): the names of the levels that you may want to discard from the output. --- ### Example **Input** | Region | City | Population | |:---------:|:--------:|:-----------:| | Idf | Panam| 200 | | Idf | Antony | 50 | | Nord | Lille | 20 | ```cson roll_up: levels: ["Region", "City"] groupby_vars: "Population" ``` **Output** | Region | City | Population | value | type | |:---------:|:--------:|:-----------:|:--------:|:------:| | Idf | Panam| 200 | Panam | City | | Idf | Antony | 50 | Antony | City | | Nord | Lille | 20 | Lille | City | | Idf | Nan | 250 | Idf | Region | | Nord | Nan | 20 | Nord | Region | """ dfs = list() groupby_cols_cpy = list(levels) levels_cpy = list(levels) levels_cpy.reverse() extra_groupby_cols = extra_groupby_cols or [] drop_levels = drop_levels or [] previous_level = None for top_level in levels_cpy: # Aggregation gb_df = getattr(df.groupby(groupby_cols_cpy + extra_groupby_cols)[groupby_vars], agg_func)().reset_index() # Melt-like columns gb_df[var_name] = top_level gb_df[value_name] = gb_df[top_level] dfs.append(gb_df) if previous_level in drop_levels: del dfs[-2] # depends on [control=['if'], data=[]] previous_level = top_level # Remove one level each time in the groupby: lowest level column needs # a groupby with every levels, the next level needs every one except # the lowest, etc. until the top level column that needs only itself # inside the groupby. groupby_cols_cpy.pop() # depends on [control=['for'], data=['top_level']] return pd.concat(dfs, sort=False).reset_index()
def to_python(self): """Decode this KeyValueTable object to standard Python types.""" mapping = {} for row in self.rows: mapping[row[0]] = _format_python_value(row[1]) return mapping
def function[to_python, parameter[self]]: constant[Decode this KeyValueTable object to standard Python types.] variable[mapping] assign[=] dictionary[[], []] for taget[name[row]] in starred[name[self].rows] begin[:] call[name[mapping]][call[name[row]][constant[0]]] assign[=] call[name[_format_python_value], parameter[call[name[row]][constant[1]]]] return[name[mapping]]
keyword[def] identifier[to_python] ( identifier[self] ): literal[string] identifier[mapping] ={} keyword[for] identifier[row] keyword[in] identifier[self] . identifier[rows] : identifier[mapping] [ identifier[row] [ literal[int] ]]= identifier[_format_python_value] ( identifier[row] [ literal[int] ]) keyword[return] identifier[mapping]
def to_python(self): """Decode this KeyValueTable object to standard Python types.""" mapping = {} for row in self.rows: mapping[row[0]] = _format_python_value(row[1]) # depends on [control=['for'], data=['row']] return mapping
def get_facts_by_name_and_value(api_url=None, fact_name=None, fact_value=None, verify=False, cert=list()): """ Returns facts by name and value :param api_url: Base PuppetDB API url :param fact_name: Name of fact :param fact_value: Value of fact """ return utils._make_api_request(api_url, '/facts/{0}/{1}'.format(fact_name, fact_value), verify, cert)
def function[get_facts_by_name_and_value, parameter[api_url, fact_name, fact_value, verify, cert]]: constant[ Returns facts by name and value :param api_url: Base PuppetDB API url :param fact_name: Name of fact :param fact_value: Value of fact ] return[call[name[utils]._make_api_request, parameter[name[api_url], call[constant[/facts/{0}/{1}].format, parameter[name[fact_name], name[fact_value]]], name[verify], name[cert]]]]
keyword[def] identifier[get_facts_by_name_and_value] ( identifier[api_url] = keyword[None] , identifier[fact_name] = keyword[None] , identifier[fact_value] = keyword[None] , identifier[verify] = keyword[False] , identifier[cert] = identifier[list] ()): literal[string] keyword[return] identifier[utils] . identifier[_make_api_request] ( identifier[api_url] , literal[string] . identifier[format] ( identifier[fact_name] , identifier[fact_value] ), identifier[verify] , identifier[cert] )
def get_facts_by_name_and_value(api_url=None, fact_name=None, fact_value=None, verify=False, cert=list()): """ Returns facts by name and value :param api_url: Base PuppetDB API url :param fact_name: Name of fact :param fact_value: Value of fact """ return utils._make_api_request(api_url, '/facts/{0}/{1}'.format(fact_name, fact_value), verify, cert)
def encodeDNA(seq_vec, maxlen=None, seq_align="start"): """Convert the DNA sequence into 1-hot-encoding numpy array # Arguments seq_vec: list of chars List of sequences that can have different lengths maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? # Returns 3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4) # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8) >>> X_seq.shape (2, 8, 4) >>> print(X_seq) [[[0 0 0 1] [1 0 0 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [1 0 0 0] [0 0 1 0] [1 0 0 0]] [[0 0 0 0] [0 0 0 0] [0 0 0 1] [0 1 0 0] [0 0 0 1] [0 0 0 1] [0 0 0 1] [1 0 0 0]]] ``` """ return encodeSequence(seq_vec, vocab=DNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
def function[encodeDNA, parameter[seq_vec, maxlen, seq_align]]: constant[Convert the DNA sequence into 1-hot-encoding numpy array # Arguments seq_vec: list of chars List of sequences that can have different lengths maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? # Returns 3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4) # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8) >>> X_seq.shape (2, 8, 4) >>> print(X_seq) [[[0 0 0 1] [1 0 0 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [1 0 0 0] [0 0 1 0] [1 0 0 0]] [[0 0 0 0] [0 0 0 0] [0 0 0 1] [0 1 0 0] [0 0 0 1] [0 0 0 1] [0 0 0 1] [1 0 0 0]]] ``` ] return[call[name[encodeSequence], parameter[name[seq_vec]]]]
keyword[def] identifier[encodeDNA] ( identifier[seq_vec] , identifier[maxlen] = keyword[None] , identifier[seq_align] = literal[string] ): literal[string] keyword[return] identifier[encodeSequence] ( identifier[seq_vec] , identifier[vocab] = identifier[DNA] , identifier[neutral_vocab] = literal[string] , identifier[maxlen] = identifier[maxlen] , identifier[seq_align] = identifier[seq_align] , identifier[pad_value] = literal[string] , identifier[encode_type] = literal[string] )
def encodeDNA(seq_vec, maxlen=None, seq_align='start'): """Convert the DNA sequence into 1-hot-encoding numpy array # Arguments seq_vec: list of chars List of sequences that can have different lengths maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? # Returns 3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4) # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8) >>> X_seq.shape (2, 8, 4) >>> print(X_seq) [[[0 0 0 1] [1 0 0 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [1 0 0 0] [0 0 1 0] [1 0 0 0]] [[0 0 0 0] [0 0 0 0] [0 0 0 1] [0 1 0 0] [0 0 0 1] [0 0 0 1] [0 0 0 1] [1 0 0 0]]] ``` """ return encodeSequence(seq_vec, vocab=DNA, neutral_vocab='N', maxlen=maxlen, seq_align=seq_align, pad_value='N', encode_type='one_hot')
def logout(self): """ Safely logs out the client :param timeout: See `requests timeout <http://docs.python-requests.org/en/master/user/advanced/#timeouts>`_ :return: True if the action was successful :rtype: bool """ if not hasattr(self, "_fb_h"): h_r = self._post(self.req_url.MODERN_SETTINGS_MENU, {"pmid": "4"}) self._fb_h = re.search(r'name=\\"h\\" value=\\"(.*?)\\"', h_r.text).group(1) data = {"ref": "mb", "h": self._fb_h} r = self._get(self.req_url.LOGOUT, data) self._resetValues() return r.ok
def function[logout, parameter[self]]: constant[ Safely logs out the client :param timeout: See `requests timeout <http://docs.python-requests.org/en/master/user/advanced/#timeouts>`_ :return: True if the action was successful :rtype: bool ] if <ast.UnaryOp object at 0x7da1b19cf7c0> begin[:] variable[h_r] assign[=] call[name[self]._post, parameter[name[self].req_url.MODERN_SETTINGS_MENU, dictionary[[<ast.Constant object at 0x7da1b19cc790>], [<ast.Constant object at 0x7da1b19cc580>]]]] name[self]._fb_h assign[=] call[call[name[re].search, parameter[constant[name=\\"h\\" value=\\"(.*?)\\"], name[h_r].text]].group, parameter[constant[1]]] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b19cc850>, <ast.Constant object at 0x7da1b19ccbe0>], [<ast.Constant object at 0x7da1b19cd4e0>, <ast.Attribute object at 0x7da1b19cc400>]] variable[r] assign[=] call[name[self]._get, parameter[name[self].req_url.LOGOUT, name[data]]] call[name[self]._resetValues, parameter[]] return[name[r].ok]
keyword[def] identifier[logout] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[h_r] = identifier[self] . identifier[_post] ( identifier[self] . identifier[req_url] . identifier[MODERN_SETTINGS_MENU] ,{ literal[string] : literal[string] }) identifier[self] . identifier[_fb_h] = identifier[re] . identifier[search] ( literal[string] , identifier[h_r] . identifier[text] ). identifier[group] ( literal[int] ) identifier[data] ={ literal[string] : literal[string] , literal[string] : identifier[self] . identifier[_fb_h] } identifier[r] = identifier[self] . identifier[_get] ( identifier[self] . identifier[req_url] . identifier[LOGOUT] , identifier[data] ) identifier[self] . identifier[_resetValues] () keyword[return] identifier[r] . identifier[ok]
def logout(self): """ Safely logs out the client :param timeout: See `requests timeout <http://docs.python-requests.org/en/master/user/advanced/#timeouts>`_ :return: True if the action was successful :rtype: bool """ if not hasattr(self, '_fb_h'): h_r = self._post(self.req_url.MODERN_SETTINGS_MENU, {'pmid': '4'}) self._fb_h = re.search('name=\\\\"h\\\\" value=\\\\"(.*?)\\\\"', h_r.text).group(1) # depends on [control=['if'], data=[]] data = {'ref': 'mb', 'h': self._fb_h} r = self._get(self.req_url.LOGOUT, data) self._resetValues() return r.ok
def load_source_vocabs(folder: str) -> List[Vocab]: """ Loads source vocabularies from folder. The first element in the list is the primary source vocabulary. Other elements correspond to optional additional source factor vocabularies found in folder. :param folder: Source folder. :return: List of vocabularies. """ return [vocab_from_json(os.path.join(folder, fname)) for fname in sorted([f for f in os.listdir(folder) if f.startswith(C.VOCAB_SRC_PREFIX)])]
def function[load_source_vocabs, parameter[folder]]: constant[ Loads source vocabularies from folder. The first element in the list is the primary source vocabulary. Other elements correspond to optional additional source factor vocabularies found in folder. :param folder: Source folder. :return: List of vocabularies. ] return[<ast.ListComp object at 0x7da1b1d45ae0>]
keyword[def] identifier[load_source_vocabs] ( identifier[folder] : identifier[str] )-> identifier[List] [ identifier[Vocab] ]: literal[string] keyword[return] [ identifier[vocab_from_json] ( identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , identifier[fname] )) keyword[for] identifier[fname] keyword[in] identifier[sorted] ([ identifier[f] keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[folder] ) keyword[if] identifier[f] . identifier[startswith] ( identifier[C] . identifier[VOCAB_SRC_PREFIX] )])]
def load_source_vocabs(folder: str) -> List[Vocab]: """ Loads source vocabularies from folder. The first element in the list is the primary source vocabulary. Other elements correspond to optional additional source factor vocabularies found in folder. :param folder: Source folder. :return: List of vocabularies. """ return [vocab_from_json(os.path.join(folder, fname)) for fname in sorted([f for f in os.listdir(folder) if f.startswith(C.VOCAB_SRC_PREFIX)])]
def reindex(self, new_index_name: str, identifier_key: str, **kwargs) -> 'ElasticIndex': """Reindex the entire index. Scrolls the old index and bulk indexes all data into the new index. :param new_index_name: :param identifier_key: :param kwargs: Overwrite ElasticIndex __init__ params. :return: """ if 'url' not in kwargs: kwargs['url'] = self.url if 'doc_type' not in kwargs: kwargs['doc_type'] = self.doc_type if 'mapping' not in kwargs: kwargs['mapping'] = self.mapping new_index = ElasticIndex(new_index_name, **kwargs) for results in self.scroll(size=500): new_index.bulk(results, identifier_key) return new_index
def function[reindex, parameter[self, new_index_name, identifier_key]]: constant[Reindex the entire index. Scrolls the old index and bulk indexes all data into the new index. :param new_index_name: :param identifier_key: :param kwargs: Overwrite ElasticIndex __init__ params. :return: ] if compare[constant[url] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] call[name[kwargs]][constant[url]] assign[=] name[self].url if compare[constant[doc_type] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] call[name[kwargs]][constant[doc_type]] assign[=] name[self].doc_type if compare[constant[mapping] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] call[name[kwargs]][constant[mapping]] assign[=] name[self].mapping variable[new_index] assign[=] call[name[ElasticIndex], parameter[name[new_index_name]]] for taget[name[results]] in starred[call[name[self].scroll, parameter[]]] begin[:] call[name[new_index].bulk, parameter[name[results], name[identifier_key]]] return[name[new_index]]
keyword[def] identifier[reindex] ( identifier[self] , identifier[new_index_name] : identifier[str] , identifier[identifier_key] : identifier[str] ,** identifier[kwargs] )-> literal[string] : literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[url] keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[doc_type] keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[mapping] identifier[new_index] = identifier[ElasticIndex] ( identifier[new_index_name] ,** identifier[kwargs] ) keyword[for] identifier[results] keyword[in] identifier[self] . identifier[scroll] ( identifier[size] = literal[int] ): identifier[new_index] . identifier[bulk] ( identifier[results] , identifier[identifier_key] ) keyword[return] identifier[new_index]
def reindex(self, new_index_name: str, identifier_key: str, **kwargs) -> 'ElasticIndex': """Reindex the entire index. Scrolls the old index and bulk indexes all data into the new index. :param new_index_name: :param identifier_key: :param kwargs: Overwrite ElasticIndex __init__ params. :return: """ if 'url' not in kwargs: kwargs['url'] = self.url # depends on [control=['if'], data=['kwargs']] if 'doc_type' not in kwargs: kwargs['doc_type'] = self.doc_type # depends on [control=['if'], data=['kwargs']] if 'mapping' not in kwargs: kwargs['mapping'] = self.mapping # depends on [control=['if'], data=['kwargs']] new_index = ElasticIndex(new_index_name, **kwargs) for results in self.scroll(size=500): new_index.bulk(results, identifier_key) # depends on [control=['for'], data=['results']] return new_index
def register(scheme): """ Registers a new scheme to the urlparser. :param schema | <str> """ scheme = nstr(scheme) urlparse.uses_fragment.append(scheme) urlparse.uses_netloc.append(scheme) urlparse.uses_params.append(scheme) urlparse.uses_query.append(scheme) urlparse.uses_relative.append(scheme)
def function[register, parameter[scheme]]: constant[ Registers a new scheme to the urlparser. :param schema | <str> ] variable[scheme] assign[=] call[name[nstr], parameter[name[scheme]]] call[name[urlparse].uses_fragment.append, parameter[name[scheme]]] call[name[urlparse].uses_netloc.append, parameter[name[scheme]]] call[name[urlparse].uses_params.append, parameter[name[scheme]]] call[name[urlparse].uses_query.append, parameter[name[scheme]]] call[name[urlparse].uses_relative.append, parameter[name[scheme]]]
keyword[def] identifier[register] ( identifier[scheme] ): literal[string] identifier[scheme] = identifier[nstr] ( identifier[scheme] ) identifier[urlparse] . identifier[uses_fragment] . identifier[append] ( identifier[scheme] ) identifier[urlparse] . identifier[uses_netloc] . identifier[append] ( identifier[scheme] ) identifier[urlparse] . identifier[uses_params] . identifier[append] ( identifier[scheme] ) identifier[urlparse] . identifier[uses_query] . identifier[append] ( identifier[scheme] ) identifier[urlparse] . identifier[uses_relative] . identifier[append] ( identifier[scheme] )
def register(scheme): """ Registers a new scheme to the urlparser. :param schema | <str> """ scheme = nstr(scheme) urlparse.uses_fragment.append(scheme) urlparse.uses_netloc.append(scheme) urlparse.uses_params.append(scheme) urlparse.uses_query.append(scheme) urlparse.uses_relative.append(scheme)
def list_env(self, key=None): """ Displays a list of environment key/value pairs. """ for k, v in sorted(self.genv.items(), key=lambda o: o[0]): if key and k != key: continue print('%s ' % (k,)) pprint(v, indent=4)
def function[list_env, parameter[self, key]]: constant[ Displays a list of environment key/value pairs. ] for taget[tuple[[<ast.Name object at 0x7da1b00e0790>, <ast.Name object at 0x7da1b00e1930>]]] in starred[call[name[sorted], parameter[call[name[self].genv.items, parameter[]]]]] begin[:] if <ast.BoolOp object at 0x7da1b00e2260> begin[:] continue call[name[print], parameter[binary_operation[constant[%s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b00e2200>]]]]] call[name[pprint], parameter[name[v]]]
keyword[def] identifier[list_env] ( identifier[self] , identifier[key] = keyword[None] ): literal[string] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sorted] ( identifier[self] . identifier[genv] . identifier[items] (), identifier[key] = keyword[lambda] identifier[o] : identifier[o] [ literal[int] ]): keyword[if] identifier[key] keyword[and] identifier[k] != identifier[key] : keyword[continue] identifier[print] ( literal[string] %( identifier[k] ,)) identifier[pprint] ( identifier[v] , identifier[indent] = literal[int] )
def list_env(self, key=None): """ Displays a list of environment key/value pairs. """ for (k, v) in sorted(self.genv.items(), key=lambda o: o[0]): if key and k != key: continue # depends on [control=['if'], data=[]] print('%s ' % (k,)) pprint(v, indent=4) # depends on [control=['for'], data=[]]
def vol_per_rev_3_stop(color="", inner_diameter=0): """Return the volume per revolution of an Ismatec 6 roller pump given the inner diameter (ID) of 3-stop tubing. The calculation is interpolated from the table found at http://www.ismatec.com/int_e/pumps/t_mini_s_ms_ca/tubing_msca2.htm. Note: 1. Either input a string as the tubing color code or a number as the tubing inner diameter. If both are given, the function will default to using the color. 2. The calculation is interpolated for inner diameters between 0.13 and 3.17 mm. Accuracy is not guaranteed for tubes with smaller or larger diameters. :param color: Color code of the Ismatec 3-stop tubing :type color: string :param inner_diameter: Inner diameter of the Ismatec 3-stop tubing. Results will be most accurate for inner diameters between 0.13 and 3.17 mm. :type inner_diameter: float :return: Volume per revolution output by a 6-roller pump through the 3-stop tubing (mL/rev) :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import vol_per_rev_3_stop >>> from aguaclara.core.units import unit_registry as u >>> round(vol_per_rev_3_stop(color="yellow-blue"), 6) <Quantity(0.148846, 'milliliter / rev')> >>> round(vol_per_rev_3_stop(inner_diameter=.20*u.mm), 6) <Quantity(0.003116, 'milliliter / rev')> """ if color != "": inner_diameter = ID_colored_tube(color) term1 = (R_pump * 2 * np.pi - k_nonlinear * inner_diameter) / u.rev term2 = np.pi * (inner_diameter ** 2) / 4 return (term1 * term2).to(u.mL/u.rev)
def function[vol_per_rev_3_stop, parameter[color, inner_diameter]]: constant[Return the volume per revolution of an Ismatec 6 roller pump given the inner diameter (ID) of 3-stop tubing. The calculation is interpolated from the table found at http://www.ismatec.com/int_e/pumps/t_mini_s_ms_ca/tubing_msca2.htm. Note: 1. Either input a string as the tubing color code or a number as the tubing inner diameter. If both are given, the function will default to using the color. 2. The calculation is interpolated for inner diameters between 0.13 and 3.17 mm. Accuracy is not guaranteed for tubes with smaller or larger diameters. :param color: Color code of the Ismatec 3-stop tubing :type color: string :param inner_diameter: Inner diameter of the Ismatec 3-stop tubing. Results will be most accurate for inner diameters between 0.13 and 3.17 mm. :type inner_diameter: float :return: Volume per revolution output by a 6-roller pump through the 3-stop tubing (mL/rev) :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import vol_per_rev_3_stop >>> from aguaclara.core.units import unit_registry as u >>> round(vol_per_rev_3_stop(color="yellow-blue"), 6) <Quantity(0.148846, 'milliliter / rev')> >>> round(vol_per_rev_3_stop(inner_diameter=.20*u.mm), 6) <Quantity(0.003116, 'milliliter / rev')> ] if compare[name[color] not_equal[!=] constant[]] begin[:] variable[inner_diameter] assign[=] call[name[ID_colored_tube], parameter[name[color]]] variable[term1] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[R_pump] * constant[2]] * name[np].pi] - binary_operation[name[k_nonlinear] * name[inner_diameter]]] / name[u].rev] variable[term2] assign[=] binary_operation[binary_operation[name[np].pi * binary_operation[name[inner_diameter] ** constant[2]]] / constant[4]] return[call[binary_operation[name[term1] * name[term2]].to, parameter[binary_operation[name[u].mL / name[u].rev]]]]
keyword[def] identifier[vol_per_rev_3_stop] ( identifier[color] = literal[string] , identifier[inner_diameter] = literal[int] ): literal[string] keyword[if] identifier[color] != literal[string] : identifier[inner_diameter] = identifier[ID_colored_tube] ( identifier[color] ) identifier[term1] =( identifier[R_pump] * literal[int] * identifier[np] . identifier[pi] - identifier[k_nonlinear] * identifier[inner_diameter] )/ identifier[u] . identifier[rev] identifier[term2] = identifier[np] . identifier[pi] *( identifier[inner_diameter] ** literal[int] )/ literal[int] keyword[return] ( identifier[term1] * identifier[term2] ). identifier[to] ( identifier[u] . identifier[mL] / identifier[u] . identifier[rev] )
def vol_per_rev_3_stop(color='', inner_diameter=0): """Return the volume per revolution of an Ismatec 6 roller pump given the inner diameter (ID) of 3-stop tubing. The calculation is interpolated from the table found at http://www.ismatec.com/int_e/pumps/t_mini_s_ms_ca/tubing_msca2.htm. Note: 1. Either input a string as the tubing color code or a number as the tubing inner diameter. If both are given, the function will default to using the color. 2. The calculation is interpolated for inner diameters between 0.13 and 3.17 mm. Accuracy is not guaranteed for tubes with smaller or larger diameters. :param color: Color code of the Ismatec 3-stop tubing :type color: string :param inner_diameter: Inner diameter of the Ismatec 3-stop tubing. Results will be most accurate for inner diameters between 0.13 and 3.17 mm. :type inner_diameter: float :return: Volume per revolution output by a 6-roller pump through the 3-stop tubing (mL/rev) :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import vol_per_rev_3_stop >>> from aguaclara.core.units import unit_registry as u >>> round(vol_per_rev_3_stop(color="yellow-blue"), 6) <Quantity(0.148846, 'milliliter / rev')> >>> round(vol_per_rev_3_stop(inner_diameter=.20*u.mm), 6) <Quantity(0.003116, 'milliliter / rev')> """ if color != '': inner_diameter = ID_colored_tube(color) # depends on [control=['if'], data=['color']] term1 = (R_pump * 2 * np.pi - k_nonlinear * inner_diameter) / u.rev term2 = np.pi * inner_diameter ** 2 / 4 return (term1 * term2).to(u.mL / u.rev)
def mu(self, lp, dist): """ glm mean function, ie inverse of link function this is useful for going from the linear prediction to mu Parameters ---------- lp : array-like of legth n dist : Distribution instance Returns ------- mu : np.array of length n """ elp = np.exp(lp) return dist.levels * elp / (elp + 1)
def function[mu, parameter[self, lp, dist]]: constant[ glm mean function, ie inverse of link function this is useful for going from the linear prediction to mu Parameters ---------- lp : array-like of legth n dist : Distribution instance Returns ------- mu : np.array of length n ] variable[elp] assign[=] call[name[np].exp, parameter[name[lp]]] return[binary_operation[binary_operation[name[dist].levels * name[elp]] / binary_operation[name[elp] + constant[1]]]]
keyword[def] identifier[mu] ( identifier[self] , identifier[lp] , identifier[dist] ): literal[string] identifier[elp] = identifier[np] . identifier[exp] ( identifier[lp] ) keyword[return] identifier[dist] . identifier[levels] * identifier[elp] /( identifier[elp] + literal[int] )
def mu(self, lp, dist): """ glm mean function, ie inverse of link function this is useful for going from the linear prediction to mu Parameters ---------- lp : array-like of legth n dist : Distribution instance Returns ------- mu : np.array of length n """ elp = np.exp(lp) return dist.levels * elp / (elp + 1)
def send_output_report(self, data): """Send input/output/feature report ID = report_id, data should be a c_ubyte object with included the required report data """ assert( self.is_opened() ) #make sure we have c_ubyte array storage if not ( isinstance(data, ctypes.Array) and \ issubclass(data._type_, c_ubyte) ): raw_data_type = c_ubyte * len(data) raw_data = raw_data_type() for index in range( len(data) ): raw_data[index] = data[index] else: raw_data = data # # Adding a lock when writing (overlapped writes) over_write = winapi.OVERLAPPED() over_write.h_event = winapi.CreateEvent(None, 0, 0, None) if over_write.h_event: try: overlapped_write = over_write winapi.WriteFile(int(self.hid_handle), byref(raw_data), len(raw_data), None, byref(overlapped_write)) #none overlapped error = ctypes.GetLastError() if error == winapi.ERROR_IO_PENDING: # overlapped operation in progress result = error elif error == 1167: raise HIDError("Error device disconnected before write") else: raise HIDError("Error %d when trying to write to HID "\ "device: %s"%(error, ctypes.FormatError(error)) ) result = winapi.WaitForSingleObject(overlapped_write.h_event, 10000 ) if result != winapi.WAIT_OBJECT_0: # If the write times out make sure to # cancel it, otherwise memory could # get corrupted if the async write # completes after this functions returns winapi.CancelIo( int(self.hid_handle) ) raise HIDError("Write timed out") finally: # Make sure the event is closed so resources aren't leaked winapi.CloseHandle(over_write.h_event) else: return winapi.WriteFile(int(self.hid_handle), byref(raw_data), len(raw_data), None, None) #none overlapped return True
def function[send_output_report, parameter[self, data]]: constant[Send input/output/feature report ID = report_id, data should be a c_ubyte object with included the required report data ] assert[call[name[self].is_opened, parameter[]]] if <ast.UnaryOp object at 0x7da1b06bc5b0> begin[:] variable[raw_data_type] assign[=] binary_operation[name[c_ubyte] * call[name[len], parameter[name[data]]]] variable[raw_data] assign[=] call[name[raw_data_type], parameter[]] for taget[name[index]] in starred[call[name[range], parameter[call[name[len], parameter[name[data]]]]]] begin[:] call[name[raw_data]][name[index]] assign[=] call[name[data]][name[index]] variable[over_write] assign[=] call[name[winapi].OVERLAPPED, parameter[]] name[over_write].h_event assign[=] call[name[winapi].CreateEvent, parameter[constant[None], constant[0], constant[0], constant[None]]] if name[over_write].h_event begin[:] <ast.Try object at 0x7da1b06be230> return[constant[True]]
keyword[def] identifier[send_output_report] ( identifier[self] , identifier[data] ): literal[string] keyword[assert] ( identifier[self] . identifier[is_opened] ()) keyword[if] keyword[not] ( identifier[isinstance] ( identifier[data] , identifier[ctypes] . identifier[Array] ) keyword[and] identifier[issubclass] ( identifier[data] . identifier[_type_] , identifier[c_ubyte] )): identifier[raw_data_type] = identifier[c_ubyte] * identifier[len] ( identifier[data] ) identifier[raw_data] = identifier[raw_data_type] () keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[len] ( identifier[data] )): identifier[raw_data] [ identifier[index] ]= identifier[data] [ identifier[index] ] keyword[else] : identifier[raw_data] = identifier[data] identifier[over_write] = identifier[winapi] . identifier[OVERLAPPED] () identifier[over_write] . identifier[h_event] = identifier[winapi] . identifier[CreateEvent] ( keyword[None] , literal[int] , literal[int] , keyword[None] ) keyword[if] identifier[over_write] . identifier[h_event] : keyword[try] : identifier[overlapped_write] = identifier[over_write] identifier[winapi] . identifier[WriteFile] ( identifier[int] ( identifier[self] . identifier[hid_handle] ), identifier[byref] ( identifier[raw_data] ), identifier[len] ( identifier[raw_data] ), keyword[None] , identifier[byref] ( identifier[overlapped_write] )) identifier[error] = identifier[ctypes] . identifier[GetLastError] () keyword[if] identifier[error] == identifier[winapi] . identifier[ERROR_IO_PENDING] : identifier[result] = identifier[error] keyword[elif] identifier[error] == literal[int] : keyword[raise] identifier[HIDError] ( literal[string] ) keyword[else] : keyword[raise] identifier[HIDError] ( literal[string] literal[string] %( identifier[error] , identifier[ctypes] . identifier[FormatError] ( identifier[error] ))) identifier[result] = identifier[winapi] . identifier[WaitForSingleObject] ( identifier[overlapped_write] . identifier[h_event] , literal[int] ) keyword[if] identifier[result] != identifier[winapi] . identifier[WAIT_OBJECT_0] : identifier[winapi] . identifier[CancelIo] ( identifier[int] ( identifier[self] . identifier[hid_handle] )) keyword[raise] identifier[HIDError] ( literal[string] ) keyword[finally] : identifier[winapi] . identifier[CloseHandle] ( identifier[over_write] . identifier[h_event] ) keyword[else] : keyword[return] identifier[winapi] . identifier[WriteFile] ( identifier[int] ( identifier[self] . identifier[hid_handle] ), identifier[byref] ( identifier[raw_data] ), identifier[len] ( identifier[raw_data] ), keyword[None] , keyword[None] ) keyword[return] keyword[True]
def send_output_report(self, data): """Send input/output/feature report ID = report_id, data should be a c_ubyte object with included the required report data """ assert self.is_opened() #make sure we have c_ubyte array storage if not (isinstance(data, ctypes.Array) and issubclass(data._type_, c_ubyte)): raw_data_type = c_ubyte * len(data) raw_data = raw_data_type() for index in range(len(data)): raw_data[index] = data[index] # depends on [control=['for'], data=['index']] # depends on [control=['if'], data=[]] else: raw_data = data # # Adding a lock when writing (overlapped writes) over_write = winapi.OVERLAPPED() over_write.h_event = winapi.CreateEvent(None, 0, 0, None) if over_write.h_event: try: overlapped_write = over_write winapi.WriteFile(int(self.hid_handle), byref(raw_data), len(raw_data), None, byref(overlapped_write)) #none overlapped error = ctypes.GetLastError() if error == winapi.ERROR_IO_PENDING: # overlapped operation in progress result = error # depends on [control=['if'], data=['error']] elif error == 1167: raise HIDError('Error device disconnected before write') # depends on [control=['if'], data=[]] else: raise HIDError('Error %d when trying to write to HID device: %s' % (error, ctypes.FormatError(error))) result = winapi.WaitForSingleObject(overlapped_write.h_event, 10000) if result != winapi.WAIT_OBJECT_0: # If the write times out make sure to # cancel it, otherwise memory could # get corrupted if the async write # completes after this functions returns winapi.CancelIo(int(self.hid_handle)) raise HIDError('Write timed out') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] finally: # Make sure the event is closed so resources aren't leaked winapi.CloseHandle(over_write.h_event) # depends on [control=['if'], data=[]] else: return winapi.WriteFile(int(self.hid_handle), byref(raw_data), len(raw_data), None, None) #none overlapped return True
def deserialize(data): """ Deserialize `data` to an exception instance. If the `exc_path` value matches an exception registered as ``deserializable``, return an instance of that exception type. Otherwise, return a `RemoteError` instance describing the exception that occurred. """ key = data.get('exc_path') if key in registry: exc_args = data.get('exc_args', ()) return registry[key](*exc_args) exc_type = data.get('exc_type') value = data.get('value') return RemoteError(exc_type=exc_type, value=value)
def function[deserialize, parameter[data]]: constant[ Deserialize `data` to an exception instance. If the `exc_path` value matches an exception registered as ``deserializable``, return an instance of that exception type. Otherwise, return a `RemoteError` instance describing the exception that occurred. ] variable[key] assign[=] call[name[data].get, parameter[constant[exc_path]]] if compare[name[key] in name[registry]] begin[:] variable[exc_args] assign[=] call[name[data].get, parameter[constant[exc_args], tuple[[]]]] return[call[call[name[registry]][name[key]], parameter[<ast.Starred object at 0x7da1b184b7c0>]]] variable[exc_type] assign[=] call[name[data].get, parameter[constant[exc_type]]] variable[value] assign[=] call[name[data].get, parameter[constant[value]]] return[call[name[RemoteError], parameter[]]]
keyword[def] identifier[deserialize] ( identifier[data] ): literal[string] identifier[key] = identifier[data] . identifier[get] ( literal[string] ) keyword[if] identifier[key] keyword[in] identifier[registry] : identifier[exc_args] = identifier[data] . identifier[get] ( literal[string] ,()) keyword[return] identifier[registry] [ identifier[key] ](* identifier[exc_args] ) identifier[exc_type] = identifier[data] . identifier[get] ( literal[string] ) identifier[value] = identifier[data] . identifier[get] ( literal[string] ) keyword[return] identifier[RemoteError] ( identifier[exc_type] = identifier[exc_type] , identifier[value] = identifier[value] )
def deserialize(data): """ Deserialize `data` to an exception instance. If the `exc_path` value matches an exception registered as ``deserializable``, return an instance of that exception type. Otherwise, return a `RemoteError` instance describing the exception that occurred. """ key = data.get('exc_path') if key in registry: exc_args = data.get('exc_args', ()) return registry[key](*exc_args) # depends on [control=['if'], data=['key', 'registry']] exc_type = data.get('exc_type') value = data.get('value') return RemoteError(exc_type=exc_type, value=value)
def calc2dcoords(mol): """ Calculate optimal 2D coordinates of chemical structure """ topology.recognize(mol) g = set(i for i, _ in mol.atoms_iter()) # 1: get nodes in scaffolds scaffolds = [] belongs = {} for i, rkeys in enumerate(sorted(mol.scaffolds, key=len)): scf = [] for rkey in rkeys: ring = mol.rings[rkey] for r in ring: belongs[r] = i scf.append(ring) g -= set(ring) scaffolds.append(scf) # 2: traverse nodes and scaffolds # the node and scaffold graph should be a tree (no cycles) f = True # print(scaffolds) coords = {} while g: if f and scaffolds: # largest scaffold is first stack = [scaffolds[-1][0][0]] f = False else: stack = [g.pop()] pred = {} branch = {} while stack: # print("stack: {}".format(stack)) tail = stack.pop() # print("tail: {}".format(tail)) if tail in belongs: # scaffolds scf = scaffold_coords(scaffolds[belongs[tail]]) # print(scf.keys()) # rotate and translate if not coords: coords = scf else: u = coords[pred[tail]] v = scf[tail] op = [u[0] + math.cos(u[2]), u[1] + math.sin(u[2])] translate(scf, gm.vector(v[:2], op)) rotate(scf, op, gm.rad(u[2] + math.pi - v[2])) coords.update(scf) # stack nbrs of scaffold for k in scf.keys(): pred[k] = None for nbr in mol.neighbors(k): if nbr not in scf.keys(): stack.append(nbr) pred[nbr] = k else: # append linker if tail not in pred: # isolated coords[tail] = [0, 0, 0, 1] continue p = pred[tail] x, y, ang, d = coords[p] # TODO: ring configuration coords[tail] = [x + math.cos(ang), y + math.sin(ang), ang + d * math.pi / 3, d * -1] if p not in branch: coords[p][2] = gm.rad(coords[p][2] + math.pi * 2 / 3 * d) branch[p] = 1 elif branch[p] == 1: coords[p][2] = gm.rad(coords[p][2] + math.pi * d) branch[p] += 1 elif branch[p] == 2: coords[p][2] = gm.rad(coords[p][2] + math.pi * 2 / 3 * d) branch[p] += 1 for nbr in mol.neighbors(tail): if nbr not in pred: stack.append(nbr) pred[nbr] = tail g -= set(pred) resolve_overlap(coords) for i, a in mol.atoms_iter(): mol.atom(i).coords = coords[i][:2]
def function[calc2dcoords, parameter[mol]]: constant[ Calculate optimal 2D coordinates of chemical structure ] call[name[topology].recognize, parameter[name[mol]]] variable[g] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b24e2290>]] variable[scaffolds] assign[=] list[[]] variable[belongs] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b24e2860>, <ast.Name object at 0x7da1b24e25f0>]]] in starred[call[name[enumerate], parameter[call[name[sorted], parameter[name[mol].scaffolds]]]]] begin[:] variable[scf] assign[=] list[[]] for taget[name[rkey]] in starred[name[rkeys]] begin[:] variable[ring] assign[=] call[name[mol].rings][name[rkey]] for taget[name[r]] in starred[name[ring]] begin[:] call[name[belongs]][name[r]] assign[=] name[i] call[name[scf].append, parameter[name[ring]]] <ast.AugAssign object at 0x7da1b24e1d50> call[name[scaffolds].append, parameter[name[scf]]] variable[f] assign[=] constant[True] variable[coords] assign[=] dictionary[[], []] while name[g] begin[:] if <ast.BoolOp object at 0x7da1b24e2380> begin[:] variable[stack] assign[=] list[[<ast.Subscript object at 0x7da1b24e3bb0>]] variable[f] assign[=] constant[False] variable[pred] assign[=] dictionary[[], []] variable[branch] assign[=] dictionary[[], []] while name[stack] begin[:] variable[tail] assign[=] call[name[stack].pop, parameter[]] if compare[name[tail] in name[belongs]] begin[:] variable[scf] assign[=] call[name[scaffold_coords], parameter[call[name[scaffolds]][call[name[belongs]][name[tail]]]]] if <ast.UnaryOp object at 0x7da1b24ad120> begin[:] variable[coords] assign[=] name[scf] for taget[name[k]] in starred[call[name[scf].keys, parameter[]]] begin[:] call[name[pred]][name[k]] assign[=] constant[None] for taget[name[nbr]] in starred[call[name[mol].neighbors, parameter[name[k]]]] begin[:] if compare[name[nbr] <ast.NotIn object at 0x7da2590d7190> call[name[scf].keys, parameter[]]] begin[:] call[name[stack].append, parameter[name[nbr]]] call[name[pred]][name[nbr]] assign[=] name[k] <ast.AugAssign object at 0x7da1b23d53f0> call[name[resolve_overlap], parameter[name[coords]]] for taget[tuple[[<ast.Name object at 0x7da1b23d6da0>, <ast.Name object at 0x7da1b23d6a10>]]] in starred[call[name[mol].atoms_iter, parameter[]]] begin[:] call[name[mol].atom, parameter[name[i]]].coords assign[=] call[call[name[coords]][name[i]]][<ast.Slice object at 0x7da1b23d4d60>]
keyword[def] identifier[calc2dcoords] ( identifier[mol] ): literal[string] identifier[topology] . identifier[recognize] ( identifier[mol] ) identifier[g] = identifier[set] ( identifier[i] keyword[for] identifier[i] , identifier[_] keyword[in] identifier[mol] . identifier[atoms_iter] ()) identifier[scaffolds] =[] identifier[belongs] ={} keyword[for] identifier[i] , identifier[rkeys] keyword[in] identifier[enumerate] ( identifier[sorted] ( identifier[mol] . identifier[scaffolds] , identifier[key] = identifier[len] )): identifier[scf] =[] keyword[for] identifier[rkey] keyword[in] identifier[rkeys] : identifier[ring] = identifier[mol] . identifier[rings] [ identifier[rkey] ] keyword[for] identifier[r] keyword[in] identifier[ring] : identifier[belongs] [ identifier[r] ]= identifier[i] identifier[scf] . identifier[append] ( identifier[ring] ) identifier[g] -= identifier[set] ( identifier[ring] ) identifier[scaffolds] . identifier[append] ( identifier[scf] ) identifier[f] = keyword[True] identifier[coords] ={} keyword[while] identifier[g] : keyword[if] identifier[f] keyword[and] identifier[scaffolds] : identifier[stack] =[ identifier[scaffolds] [- literal[int] ][ literal[int] ][ literal[int] ]] identifier[f] = keyword[False] keyword[else] : identifier[stack] =[ identifier[g] . identifier[pop] ()] identifier[pred] ={} identifier[branch] ={} keyword[while] identifier[stack] : identifier[tail] = identifier[stack] . identifier[pop] () keyword[if] identifier[tail] keyword[in] identifier[belongs] : identifier[scf] = identifier[scaffold_coords] ( identifier[scaffolds] [ identifier[belongs] [ identifier[tail] ]]) keyword[if] keyword[not] identifier[coords] : identifier[coords] = identifier[scf] keyword[else] : identifier[u] = identifier[coords] [ identifier[pred] [ identifier[tail] ]] identifier[v] = identifier[scf] [ identifier[tail] ] identifier[op] =[ identifier[u] [ literal[int] ]+ identifier[math] . identifier[cos] ( identifier[u] [ literal[int] ]), identifier[u] [ literal[int] ]+ identifier[math] . identifier[sin] ( identifier[u] [ literal[int] ])] identifier[translate] ( identifier[scf] , identifier[gm] . identifier[vector] ( identifier[v] [: literal[int] ], identifier[op] )) identifier[rotate] ( identifier[scf] , identifier[op] , identifier[gm] . identifier[rad] ( identifier[u] [ literal[int] ]+ identifier[math] . identifier[pi] - identifier[v] [ literal[int] ])) identifier[coords] . identifier[update] ( identifier[scf] ) keyword[for] identifier[k] keyword[in] identifier[scf] . identifier[keys] (): identifier[pred] [ identifier[k] ]= keyword[None] keyword[for] identifier[nbr] keyword[in] identifier[mol] . identifier[neighbors] ( identifier[k] ): keyword[if] identifier[nbr] keyword[not] keyword[in] identifier[scf] . identifier[keys] (): identifier[stack] . identifier[append] ( identifier[nbr] ) identifier[pred] [ identifier[nbr] ]= identifier[k] keyword[else] : keyword[if] identifier[tail] keyword[not] keyword[in] identifier[pred] : identifier[coords] [ identifier[tail] ]=[ literal[int] , literal[int] , literal[int] , literal[int] ] keyword[continue] identifier[p] = identifier[pred] [ identifier[tail] ] identifier[x] , identifier[y] , identifier[ang] , identifier[d] = identifier[coords] [ identifier[p] ] identifier[coords] [ identifier[tail] ]=[ identifier[x] + identifier[math] . identifier[cos] ( identifier[ang] ), identifier[y] + identifier[math] . identifier[sin] ( identifier[ang] ), identifier[ang] + identifier[d] * identifier[math] . identifier[pi] / literal[int] , identifier[d] *- literal[int] ] keyword[if] identifier[p] keyword[not] keyword[in] identifier[branch] : identifier[coords] [ identifier[p] ][ literal[int] ]= identifier[gm] . identifier[rad] ( identifier[coords] [ identifier[p] ][ literal[int] ]+ identifier[math] . identifier[pi] * literal[int] / literal[int] * identifier[d] ) identifier[branch] [ identifier[p] ]= literal[int] keyword[elif] identifier[branch] [ identifier[p] ]== literal[int] : identifier[coords] [ identifier[p] ][ literal[int] ]= identifier[gm] . identifier[rad] ( identifier[coords] [ identifier[p] ][ literal[int] ]+ identifier[math] . identifier[pi] * identifier[d] ) identifier[branch] [ identifier[p] ]+= literal[int] keyword[elif] identifier[branch] [ identifier[p] ]== literal[int] : identifier[coords] [ identifier[p] ][ literal[int] ]= identifier[gm] . identifier[rad] ( identifier[coords] [ identifier[p] ][ literal[int] ]+ identifier[math] . identifier[pi] * literal[int] / literal[int] * identifier[d] ) identifier[branch] [ identifier[p] ]+= literal[int] keyword[for] identifier[nbr] keyword[in] identifier[mol] . identifier[neighbors] ( identifier[tail] ): keyword[if] identifier[nbr] keyword[not] keyword[in] identifier[pred] : identifier[stack] . identifier[append] ( identifier[nbr] ) identifier[pred] [ identifier[nbr] ]= identifier[tail] identifier[g] -= identifier[set] ( identifier[pred] ) identifier[resolve_overlap] ( identifier[coords] ) keyword[for] identifier[i] , identifier[a] keyword[in] identifier[mol] . identifier[atoms_iter] (): identifier[mol] . identifier[atom] ( identifier[i] ). identifier[coords] = identifier[coords] [ identifier[i] ][: literal[int] ]
def calc2dcoords(mol): """ Calculate optimal 2D coordinates of chemical structure """ topology.recognize(mol) g = set((i for (i, _) in mol.atoms_iter())) # 1: get nodes in scaffolds scaffolds = [] belongs = {} for (i, rkeys) in enumerate(sorted(mol.scaffolds, key=len)): scf = [] for rkey in rkeys: ring = mol.rings[rkey] for r in ring: belongs[r] = i # depends on [control=['for'], data=['r']] scf.append(ring) g -= set(ring) # depends on [control=['for'], data=['rkey']] scaffolds.append(scf) # depends on [control=['for'], data=[]] # 2: traverse nodes and scaffolds # the node and scaffold graph should be a tree (no cycles) f = True # print(scaffolds) coords = {} while g: if f and scaffolds: # largest scaffold is first stack = [scaffolds[-1][0][0]] f = False # depends on [control=['if'], data=[]] else: stack = [g.pop()] pred = {} branch = {} while stack: # print("stack: {}".format(stack)) tail = stack.pop() # print("tail: {}".format(tail)) if tail in belongs: # scaffolds scf = scaffold_coords(scaffolds[belongs[tail]]) # print(scf.keys()) # rotate and translate if not coords: coords = scf # depends on [control=['if'], data=[]] else: u = coords[pred[tail]] v = scf[tail] op = [u[0] + math.cos(u[2]), u[1] + math.sin(u[2])] translate(scf, gm.vector(v[:2], op)) rotate(scf, op, gm.rad(u[2] + math.pi - v[2])) coords.update(scf) # stack nbrs of scaffold for k in scf.keys(): pred[k] = None for nbr in mol.neighbors(k): if nbr not in scf.keys(): stack.append(nbr) pred[nbr] = k # depends on [control=['if'], data=['nbr']] # depends on [control=['for'], data=['nbr']] # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=['tail', 'belongs']] else: # append linker if tail not in pred: # isolated coords[tail] = [0, 0, 0, 1] continue # depends on [control=['if'], data=['tail']] p = pred[tail] (x, y, ang, d) = coords[p] # TODO: ring configuration coords[tail] = [x + math.cos(ang), y + math.sin(ang), ang + d * math.pi / 3, d * -1] if p not in branch: coords[p][2] = gm.rad(coords[p][2] + math.pi * 2 / 3 * d) branch[p] = 1 # depends on [control=['if'], data=['p', 'branch']] elif branch[p] == 1: coords[p][2] = gm.rad(coords[p][2] + math.pi * d) branch[p] += 1 # depends on [control=['if'], data=[]] elif branch[p] == 2: coords[p][2] = gm.rad(coords[p][2] + math.pi * 2 / 3 * d) branch[p] += 1 # depends on [control=['if'], data=[]] for nbr in mol.neighbors(tail): if nbr not in pred: stack.append(nbr) pred[nbr] = tail # depends on [control=['if'], data=['nbr', 'pred']] # depends on [control=['for'], data=['nbr']] # depends on [control=['while'], data=[]] g -= set(pred) # depends on [control=['while'], data=[]] resolve_overlap(coords) for (i, a) in mol.atoms_iter(): mol.atom(i).coords = coords[i][:2] # depends on [control=['for'], data=[]]
def tile_sprite(self, out_format="sprite.json", out_folder=None): """ This resource returns sprite image and metadata """ url = "{url}/resources/sprites/{f}".format(url=self._url, f=out_format) if out_folder is None: out_folder = tempfile.gettempdir() return self._get(url=url, param_dict={}, out_folder=out_folder, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_host)
def function[tile_sprite, parameter[self, out_format, out_folder]]: constant[ This resource returns sprite image and metadata ] variable[url] assign[=] call[constant[{url}/resources/sprites/{f}].format, parameter[]] if compare[name[out_folder] is constant[None]] begin[:] variable[out_folder] assign[=] call[name[tempfile].gettempdir, parameter[]] return[call[name[self]._get, parameter[]]]
keyword[def] identifier[tile_sprite] ( identifier[self] , identifier[out_format] = literal[string] , identifier[out_folder] = keyword[None] ): literal[string] identifier[url] = literal[string] . identifier[format] ( identifier[url] = identifier[self] . identifier[_url] , identifier[f] = identifier[out_format] ) keyword[if] identifier[out_folder] keyword[is] keyword[None] : identifier[out_folder] = identifier[tempfile] . identifier[gettempdir] () keyword[return] identifier[self] . identifier[_get] ( identifier[url] = identifier[url] , identifier[param_dict] ={}, identifier[out_folder] = identifier[out_folder] , identifier[securityHandler] = identifier[self] . identifier[_securityHandler] , identifier[proxy_port] = identifier[self] . identifier[_proxy_port] , identifier[proxy_url] = identifier[self] . identifier[_proxy_host] )
def tile_sprite(self, out_format='sprite.json', out_folder=None): """ This resource returns sprite image and metadata """ url = '{url}/resources/sprites/{f}'.format(url=self._url, f=out_format) if out_folder is None: out_folder = tempfile.gettempdir() # depends on [control=['if'], data=['out_folder']] return self._get(url=url, param_dict={}, out_folder=out_folder, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_host)
def extract(self, file_obj, extractOnly=True, handler='update/extract', **kwargs): """ POSTs a file to the Solr ExtractingRequestHandler so rich content can be processed using Apache Tika. See the Solr wiki for details: http://wiki.apache.org/solr/ExtractingRequestHandler The ExtractingRequestHandler has a very simple model: it extracts contents and metadata from the uploaded file and inserts it directly into the index. This is rarely useful as it allows no way to store additional data or otherwise customize the record. Instead, by default we'll use the extract-only mode to extract the data without indexing it so the caller has the opportunity to process it as appropriate; call with ``extractOnly=False`` if you want to insert with no additional processing. Returns None if metadata cannot be extracted; otherwise returns a dictionary containing at least two keys: :contents: Extracted full-text content, if applicable :metadata: key:value pairs of text strings """ if not hasattr(file_obj, "name"): raise ValueError("extract() requires file-like objects which have a defined name property") params = { "extractOnly": "true" if extractOnly else "false", "lowernames": "true", "wt": "json", } params.update(kwargs) filename = quote(file_obj.name.encode('utf-8')) try: # We'll provide the file using its true name as Tika may use that # as a file type hint: resp = self._send_request('post', handler, body=params, files={'file': (filename, file_obj)}) except (IOError, SolrError): self.log.exception("Failed to extract document metadata") raise try: data = json.loads(resp) except ValueError: self.log.exception("Failed to load JSON response") raise data['contents'] = data.pop(filename, None) data['metadata'] = metadata = {} raw_metadata = data.pop("%s_metadata" % filename, None) if raw_metadata: # The raw format is somewhat annoying: it's a flat list of # alternating keys and value lists while raw_metadata: metadata[raw_metadata.pop()] = raw_metadata.pop() return data
def function[extract, parameter[self, file_obj, extractOnly, handler]]: constant[ POSTs a file to the Solr ExtractingRequestHandler so rich content can be processed using Apache Tika. See the Solr wiki for details: http://wiki.apache.org/solr/ExtractingRequestHandler The ExtractingRequestHandler has a very simple model: it extracts contents and metadata from the uploaded file and inserts it directly into the index. This is rarely useful as it allows no way to store additional data or otherwise customize the record. Instead, by default we'll use the extract-only mode to extract the data without indexing it so the caller has the opportunity to process it as appropriate; call with ``extractOnly=False`` if you want to insert with no additional processing. Returns None if metadata cannot be extracted; otherwise returns a dictionary containing at least two keys: :contents: Extracted full-text content, if applicable :metadata: key:value pairs of text strings ] if <ast.UnaryOp object at 0x7da20e9b1750> begin[:] <ast.Raise object at 0x7da20e9b2ce0> variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b01c0>, <ast.Constant object at 0x7da20e9b2830>, <ast.Constant object at 0x7da20e9b1fc0>], [<ast.IfExp object at 0x7da20e9b3af0>, <ast.Constant object at 0x7da20e9b1600>, <ast.Constant object at 0x7da20e9b39a0>]] call[name[params].update, parameter[name[kwargs]]] variable[filename] assign[=] call[name[quote], parameter[call[name[file_obj].name.encode, parameter[constant[utf-8]]]]] <ast.Try object at 0x7da20e9b18d0> <ast.Try object at 0x7da1b22ade70> call[name[data]][constant[contents]] assign[=] call[name[data].pop, parameter[name[filename], constant[None]]] call[name[data]][constant[metadata]] assign[=] dictionary[[], []] variable[raw_metadata] assign[=] call[name[data].pop, parameter[binary_operation[constant[%s_metadata] <ast.Mod object at 0x7da2590d6920> name[filename]], constant[None]]] if name[raw_metadata] begin[:] while name[raw_metadata] begin[:] call[name[metadata]][call[name[raw_metadata].pop, parameter[]]] assign[=] call[name[raw_metadata].pop, parameter[]] return[name[data]]
keyword[def] identifier[extract] ( identifier[self] , identifier[file_obj] , identifier[extractOnly] = keyword[True] , identifier[handler] = literal[string] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[file_obj] , literal[string] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[params] ={ literal[string] : literal[string] keyword[if] identifier[extractOnly] keyword[else] literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , } identifier[params] . identifier[update] ( identifier[kwargs] ) identifier[filename] = identifier[quote] ( identifier[file_obj] . identifier[name] . identifier[encode] ( literal[string] )) keyword[try] : identifier[resp] = identifier[self] . identifier[_send_request] ( literal[string] , identifier[handler] , identifier[body] = identifier[params] , identifier[files] ={ literal[string] :( identifier[filename] , identifier[file_obj] )}) keyword[except] ( identifier[IOError] , identifier[SolrError] ): identifier[self] . identifier[log] . identifier[exception] ( literal[string] ) keyword[raise] keyword[try] : identifier[data] = identifier[json] . identifier[loads] ( identifier[resp] ) keyword[except] identifier[ValueError] : identifier[self] . identifier[log] . identifier[exception] ( literal[string] ) keyword[raise] identifier[data] [ literal[string] ]= identifier[data] . identifier[pop] ( identifier[filename] , keyword[None] ) identifier[data] [ literal[string] ]= identifier[metadata] ={} identifier[raw_metadata] = identifier[data] . identifier[pop] ( literal[string] % identifier[filename] , keyword[None] ) keyword[if] identifier[raw_metadata] : keyword[while] identifier[raw_metadata] : identifier[metadata] [ identifier[raw_metadata] . identifier[pop] ()]= identifier[raw_metadata] . identifier[pop] () keyword[return] identifier[data]
def extract(self, file_obj, extractOnly=True, handler='update/extract', **kwargs): """ POSTs a file to the Solr ExtractingRequestHandler so rich content can be processed using Apache Tika. See the Solr wiki for details: http://wiki.apache.org/solr/ExtractingRequestHandler The ExtractingRequestHandler has a very simple model: it extracts contents and metadata from the uploaded file and inserts it directly into the index. This is rarely useful as it allows no way to store additional data or otherwise customize the record. Instead, by default we'll use the extract-only mode to extract the data without indexing it so the caller has the opportunity to process it as appropriate; call with ``extractOnly=False`` if you want to insert with no additional processing. Returns None if metadata cannot be extracted; otherwise returns a dictionary containing at least two keys: :contents: Extracted full-text content, if applicable :metadata: key:value pairs of text strings """ if not hasattr(file_obj, 'name'): raise ValueError('extract() requires file-like objects which have a defined name property') # depends on [control=['if'], data=[]] params = {'extractOnly': 'true' if extractOnly else 'false', 'lowernames': 'true', 'wt': 'json'} params.update(kwargs) filename = quote(file_obj.name.encode('utf-8')) try: # We'll provide the file using its true name as Tika may use that # as a file type hint: resp = self._send_request('post', handler, body=params, files={'file': (filename, file_obj)}) # depends on [control=['try'], data=[]] except (IOError, SolrError): self.log.exception('Failed to extract document metadata') raise # depends on [control=['except'], data=[]] try: data = json.loads(resp) # depends on [control=['try'], data=[]] except ValueError: self.log.exception('Failed to load JSON response') raise # depends on [control=['except'], data=[]] data['contents'] = data.pop(filename, None) data['metadata'] = metadata = {} raw_metadata = data.pop('%s_metadata' % filename, None) if raw_metadata: # The raw format is somewhat annoying: it's a flat list of # alternating keys and value lists while raw_metadata: metadata[raw_metadata.pop()] = raw_metadata.pop() # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] return data
def dmstodd(self, dms): """ convert dms to dd""" size = len(dms) letters = 'WENS' is_annotated = False try: float(dms) except ValueError: for letter in letters: if letter in dms.upper(): is_annotated = True break if not is_annotated: raise core.RTreeError("unable to parse '%s' to decimal degrees" % dms) is_negative = False if is_annotated: dms_upper = dms.upper() if 'W' in dms_upper or 'S' in dms_upper: is_negative = True else: if dms < 0: is_negative = True if is_annotated: bletters = letters.encode(encoding='utf-8') bdms = dms.encode(encoding = 'utf-8') dms = bdms.translate(None, bletters).decode('ascii') # bletters = bytes(letters, encoding='utf-8') # bdms = bytes(dms, encoding='utf-8') # dms = bdms.translate(None, bletters).decode('ascii') # dms = dms.translate(None, letters) # Python 2.x version pieces = dms.split(".") D = 0.0 M = 0.0 S = 0.0 divisor = 3600.0 if len(pieces) == 1: S = dms[-2:] M = dms[-4:-2] D = dms[:-4] else: S = '{0:s}.{1:s}'.format (pieces[0][-2:], pieces[1]) M = pieces[0][-4:-2] D = pieces[0][:-4] DD = float(D) + float(M)/60.0 + float(S)/divisor if is_negative: DD = DD * -1.0 return DD
def function[dmstodd, parameter[self, dms]]: constant[ convert dms to dd] variable[size] assign[=] call[name[len], parameter[name[dms]]] variable[letters] assign[=] constant[WENS] variable[is_annotated] assign[=] constant[False] <ast.Try object at 0x7da1b10e7010> variable[is_negative] assign[=] constant[False] if name[is_annotated] begin[:] variable[dms_upper] assign[=] call[name[dms].upper, parameter[]] if <ast.BoolOp object at 0x7da20c76d870> begin[:] variable[is_negative] assign[=] constant[True] if name[is_annotated] begin[:] variable[bletters] assign[=] call[name[letters].encode, parameter[]] variable[bdms] assign[=] call[name[dms].encode, parameter[]] variable[dms] assign[=] call[call[name[bdms].translate, parameter[constant[None], name[bletters]]].decode, parameter[constant[ascii]]] variable[pieces] assign[=] call[name[dms].split, parameter[constant[.]]] variable[D] assign[=] constant[0.0] variable[M] assign[=] constant[0.0] variable[S] assign[=] constant[0.0] variable[divisor] assign[=] constant[3600.0] if compare[call[name[len], parameter[name[pieces]]] equal[==] constant[1]] begin[:] variable[S] assign[=] call[name[dms]][<ast.Slice object at 0x7da1b1052f20>] variable[M] assign[=] call[name[dms]][<ast.Slice object at 0x7da1b1053670>] variable[D] assign[=] call[name[dms]][<ast.Slice object at 0x7da1b10d7970>] variable[DD] assign[=] binary_operation[binary_operation[call[name[float], parameter[name[D]]] + binary_operation[call[name[float], parameter[name[M]]] / constant[60.0]]] + binary_operation[call[name[float], parameter[name[S]]] / name[divisor]]] if name[is_negative] begin[:] variable[DD] assign[=] binary_operation[name[DD] * <ast.UnaryOp object at 0x7da1b10d47c0>] return[name[DD]]
keyword[def] identifier[dmstodd] ( identifier[self] , identifier[dms] ): literal[string] identifier[size] = identifier[len] ( identifier[dms] ) identifier[letters] = literal[string] identifier[is_annotated] = keyword[False] keyword[try] : identifier[float] ( identifier[dms] ) keyword[except] identifier[ValueError] : keyword[for] identifier[letter] keyword[in] identifier[letters] : keyword[if] identifier[letter] keyword[in] identifier[dms] . identifier[upper] (): identifier[is_annotated] = keyword[True] keyword[break] keyword[if] keyword[not] identifier[is_annotated] : keyword[raise] identifier[core] . identifier[RTreeError] ( literal[string] % identifier[dms] ) identifier[is_negative] = keyword[False] keyword[if] identifier[is_annotated] : identifier[dms_upper] = identifier[dms] . identifier[upper] () keyword[if] literal[string] keyword[in] identifier[dms_upper] keyword[or] literal[string] keyword[in] identifier[dms_upper] : identifier[is_negative] = keyword[True] keyword[else] : keyword[if] identifier[dms] < literal[int] : identifier[is_negative] = keyword[True] keyword[if] identifier[is_annotated] : identifier[bletters] = identifier[letters] . identifier[encode] ( identifier[encoding] = literal[string] ) identifier[bdms] = identifier[dms] . identifier[encode] ( identifier[encoding] = literal[string] ) identifier[dms] = identifier[bdms] . identifier[translate] ( keyword[None] , identifier[bletters] ). identifier[decode] ( literal[string] ) identifier[pieces] = identifier[dms] . identifier[split] ( literal[string] ) identifier[D] = literal[int] identifier[M] = literal[int] identifier[S] = literal[int] identifier[divisor] = literal[int] keyword[if] identifier[len] ( identifier[pieces] )== literal[int] : identifier[S] = identifier[dms] [- literal[int] :] identifier[M] = identifier[dms] [- literal[int] :- literal[int] ] identifier[D] = identifier[dms] [:- literal[int] ] keyword[else] : identifier[S] = literal[string] . identifier[format] ( identifier[pieces] [ literal[int] ][- literal[int] :], identifier[pieces] [ literal[int] ]) identifier[M] = identifier[pieces] [ literal[int] ][- literal[int] :- literal[int] ] identifier[D] = identifier[pieces] [ literal[int] ][:- literal[int] ] identifier[DD] = identifier[float] ( identifier[D] )+ identifier[float] ( identifier[M] )/ literal[int] + identifier[float] ( identifier[S] )/ identifier[divisor] keyword[if] identifier[is_negative] : identifier[DD] = identifier[DD] *- literal[int] keyword[return] identifier[DD]
def dmstodd(self, dms): """ convert dms to dd""" size = len(dms) letters = 'WENS' is_annotated = False try: float(dms) # depends on [control=['try'], data=[]] except ValueError: for letter in letters: if letter in dms.upper(): is_annotated = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['letter']] if not is_annotated: raise core.RTreeError("unable to parse '%s' to decimal degrees" % dms) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] is_negative = False if is_annotated: dms_upper = dms.upper() if 'W' in dms_upper or 'S' in dms_upper: is_negative = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif dms < 0: is_negative = True # depends on [control=['if'], data=[]] if is_annotated: bletters = letters.encode(encoding='utf-8') bdms = dms.encode(encoding='utf-8') dms = bdms.translate(None, bletters).decode('ascii') # depends on [control=['if'], data=[]] # bletters = bytes(letters, encoding='utf-8') # bdms = bytes(dms, encoding='utf-8') # dms = bdms.translate(None, bletters).decode('ascii') # dms = dms.translate(None, letters) # Python 2.x version pieces = dms.split('.') D = 0.0 M = 0.0 S = 0.0 divisor = 3600.0 if len(pieces) == 1: S = dms[-2:] M = dms[-4:-2] D = dms[:-4] # depends on [control=['if'], data=[]] else: S = '{0:s}.{1:s}'.format(pieces[0][-2:], pieces[1]) M = pieces[0][-4:-2] D = pieces[0][:-4] DD = float(D) + float(M) / 60.0 + float(S) / divisor if is_negative: DD = DD * -1.0 # depends on [control=['if'], data=[]] return DD
def handle(self, *args, **options): """Command handle.""" verbosity = int(options['verbosity']) skip_mapping = options['skip_mapping'] if self.has_filter(options): self.filter_indices(options, verbosity, skip_mapping=skip_mapping) else: # Process all indices. index_builder.delete(skip_mapping=skip_mapping)
def function[handle, parameter[self]]: constant[Command handle.] variable[verbosity] assign[=] call[name[int], parameter[call[name[options]][constant[verbosity]]]] variable[skip_mapping] assign[=] call[name[options]][constant[skip_mapping]] if call[name[self].has_filter, parameter[name[options]]] begin[:] call[name[self].filter_indices, parameter[name[options], name[verbosity]]]
keyword[def] identifier[handle] ( identifier[self] ,* identifier[args] ,** identifier[options] ): literal[string] identifier[verbosity] = identifier[int] ( identifier[options] [ literal[string] ]) identifier[skip_mapping] = identifier[options] [ literal[string] ] keyword[if] identifier[self] . identifier[has_filter] ( identifier[options] ): identifier[self] . identifier[filter_indices] ( identifier[options] , identifier[verbosity] , identifier[skip_mapping] = identifier[skip_mapping] ) keyword[else] : identifier[index_builder] . identifier[delete] ( identifier[skip_mapping] = identifier[skip_mapping] )
def handle(self, *args, **options): """Command handle.""" verbosity = int(options['verbosity']) skip_mapping = options['skip_mapping'] if self.has_filter(options): self.filter_indices(options, verbosity, skip_mapping=skip_mapping) # depends on [control=['if'], data=[]] else: # Process all indices. index_builder.delete(skip_mapping=skip_mapping)
async def listen(self): """Listen for messages on the backwards channel. This method only returns when the connection has been closed due to an error. """ retries = 0 # Number of retries attempted so far need_new_sid = True # whether a new SID is needed while retries <= self._max_retries: # After the first failed retry, back off exponentially longer after # each attempt. if retries > 0: backoff_seconds = self._retry_backoff_base ** retries logger.info('Backing off for %s seconds', backoff_seconds) await asyncio.sleep(backoff_seconds) # Request a new SID if we don't have one yet, or the previous one # became invalid. if need_new_sid: await self._fetch_channel_sid() need_new_sid = False # Clear any previous push data, since if there was an error it # could contain garbage. self._chunk_parser = ChunkParser() try: await self._longpoll_request() except ChannelSessionError as err: logger.warning('Long-polling interrupted: %s', err) need_new_sid = True except exceptions.NetworkError as err: logger.warning('Long-polling request failed: %s', err) else: # The connection closed successfully, so reset the number of # retries. retries = 0 continue retries += 1 logger.info('retry attempt count is now %s', retries) if self._is_connected: self._is_connected = False await self.on_disconnect.fire() # If the request ended with an error, the client must account for # messages being dropped during this time. logger.error('Ran out of retries for long-polling request')
<ast.AsyncFunctionDef object at 0x7da20c6c4880>
keyword[async] keyword[def] identifier[listen] ( identifier[self] ): literal[string] identifier[retries] = literal[int] identifier[need_new_sid] = keyword[True] keyword[while] identifier[retries] <= identifier[self] . identifier[_max_retries] : keyword[if] identifier[retries] > literal[int] : identifier[backoff_seconds] = identifier[self] . identifier[_retry_backoff_base] ** identifier[retries] identifier[logger] . identifier[info] ( literal[string] , identifier[backoff_seconds] ) keyword[await] identifier[asyncio] . identifier[sleep] ( identifier[backoff_seconds] ) keyword[if] identifier[need_new_sid] : keyword[await] identifier[self] . identifier[_fetch_channel_sid] () identifier[need_new_sid] = keyword[False] identifier[self] . identifier[_chunk_parser] = identifier[ChunkParser] () keyword[try] : keyword[await] identifier[self] . identifier[_longpoll_request] () keyword[except] identifier[ChannelSessionError] keyword[as] identifier[err] : identifier[logger] . identifier[warning] ( literal[string] , identifier[err] ) identifier[need_new_sid] = keyword[True] keyword[except] identifier[exceptions] . identifier[NetworkError] keyword[as] identifier[err] : identifier[logger] . identifier[warning] ( literal[string] , identifier[err] ) keyword[else] : identifier[retries] = literal[int] keyword[continue] identifier[retries] += literal[int] identifier[logger] . identifier[info] ( literal[string] , identifier[retries] ) keyword[if] identifier[self] . identifier[_is_connected] : identifier[self] . identifier[_is_connected] = keyword[False] keyword[await] identifier[self] . identifier[on_disconnect] . identifier[fire] () identifier[logger] . identifier[error] ( literal[string] )
async def listen(self): """Listen for messages on the backwards channel. This method only returns when the connection has been closed due to an error. """ retries = 0 # Number of retries attempted so far need_new_sid = True # whether a new SID is needed while retries <= self._max_retries: # After the first failed retry, back off exponentially longer after # each attempt. if retries > 0: backoff_seconds = self._retry_backoff_base ** retries logger.info('Backing off for %s seconds', backoff_seconds) await asyncio.sleep(backoff_seconds) # depends on [control=['if'], data=['retries']] # Request a new SID if we don't have one yet, or the previous one # became invalid. if need_new_sid: await self._fetch_channel_sid() need_new_sid = False # depends on [control=['if'], data=[]] # Clear any previous push data, since if there was an error it # could contain garbage. self._chunk_parser = ChunkParser() try: await self._longpoll_request() # depends on [control=['try'], data=[]] except ChannelSessionError as err: logger.warning('Long-polling interrupted: %s', err) need_new_sid = True # depends on [control=['except'], data=['err']] except exceptions.NetworkError as err: logger.warning('Long-polling request failed: %s', err) # depends on [control=['except'], data=['err']] else: # The connection closed successfully, so reset the number of # retries. retries = 0 continue retries += 1 logger.info('retry attempt count is now %s', retries) if self._is_connected: self._is_connected = False await self.on_disconnect.fire() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['retries']] # If the request ended with an error, the client must account for # messages being dropped during this time. logger.error('Ran out of retries for long-polling request')
def enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type=1, nperm=1000, rs=np.random.RandomState(), single=False, scale=False): """This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA. :param gene_list: The ordered gene list gene_name_list, rank_metric.index.values :param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set. :param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation is a very reasonable choice that allows significant gene sets with less than perfect coherence. options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1 might be appropriate. On the other hand, if one uses sets with large number of genes and only a small subset of those is expected to be coherent, then one could consider using p > 1. Our recommendation is to use p = 1 and use other settings only if you are very experienced with the method and its behavior. :param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in the gene list. Or rankings, rank_metric.values :param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value equal to the permutation number. :param rs: Random state for initializing gene list shuffling. Default: np.random.RandomState(seed=None) :return: ES: Enrichment score (real number between -1 and +1) ESNULL: Enrichment score calculated from random permutations. Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set. RES: Numerical vector containing the running enrichment score for all locations in the gene list . """ N = len(gene_list) # Test whether each element of a 1-D array is also present in a second array # It's more intuitive here than original enrichment_score source code. # use .astype to covert bool to integer tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag) if weighted_score_type == 0 : correl_vector = np.repeat(1, N) else: correl_vector = np.abs(correl_vector)**weighted_score_type # get indices of tag_indicator hit_ind = np.flatnonzero(tag_indicator).tolist() # if used for compute esnull, set esnull equal to permutation number, e.g. 1000 # else just compute enrichment scores # set axis to 1, because we have 2D array axis = 1 tag_indicator = np.tile(tag_indicator, (nperm+1,1)) correl_vector = np.tile(correl_vector,(nperm+1,1)) # gene list permutation for i in range(nperm): rs.shuffle(tag_indicator[i]) # np.apply_along_axis(rs.shuffle, 1, tag_indicator) Nhint = tag_indicator.sum(axis=axis, keepdims=True) sum_correl_tag = np.sum(correl_vector*tag_indicator, axis=axis, keepdims=True) # compute ES score, the code below is identical to gsea enrichment_score method. no_tag_indicator = 1 - tag_indicator Nmiss = N - Nhint norm_tag = 1.0/sum_correl_tag norm_no_tag = 1.0/Nmiss RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag, axis=axis) if scale: RES = RES / N if single: es_vec = RES.sum(axis=axis) else: max_ES, min_ES = RES.max(axis=axis), RES.min(axis=axis) es_vec = np.where(np.abs(max_ES) > np.abs(min_ES), max_ES, min_ES) # extract values es, esnull, RES = es_vec[-1], es_vec[:-1], RES[-1,:] return es, esnull, hit_ind, RES
def function[enrichment_score, parameter[gene_list, correl_vector, gene_set, weighted_score_type, nperm, rs, single, scale]]: constant[This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA. :param gene_list: The ordered gene list gene_name_list, rank_metric.index.values :param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set. :param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation is a very reasonable choice that allows significant gene sets with less than perfect coherence. options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1 might be appropriate. On the other hand, if one uses sets with large number of genes and only a small subset of those is expected to be coherent, then one could consider using p > 1. Our recommendation is to use p = 1 and use other settings only if you are very experienced with the method and its behavior. :param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in the gene list. Or rankings, rank_metric.values :param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value equal to the permutation number. :param rs: Random state for initializing gene list shuffling. Default: np.random.RandomState(seed=None) :return: ES: Enrichment score (real number between -1 and +1) ESNULL: Enrichment score calculated from random permutations. Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set. RES: Numerical vector containing the running enrichment score for all locations in the gene list . ] variable[N] assign[=] call[name[len], parameter[name[gene_list]]] variable[tag_indicator] assign[=] call[call[name[np].in1d, parameter[name[gene_list], name[gene_set]]].astype, parameter[name[int]]] if compare[name[weighted_score_type] equal[==] constant[0]] begin[:] variable[correl_vector] assign[=] call[name[np].repeat, parameter[constant[1], name[N]]] variable[hit_ind] assign[=] call[call[name[np].flatnonzero, parameter[name[tag_indicator]]].tolist, parameter[]] variable[axis] assign[=] constant[1] variable[tag_indicator] assign[=] call[name[np].tile, parameter[name[tag_indicator], tuple[[<ast.BinOp object at 0x7da20c9911e0>, <ast.Constant object at 0x7da20c9903a0>]]]] variable[correl_vector] assign[=] call[name[np].tile, parameter[name[correl_vector], tuple[[<ast.BinOp object at 0x7da20c991b40>, <ast.Constant object at 0x7da20c9937f0>]]]] for taget[name[i]] in starred[call[name[range], parameter[name[nperm]]]] begin[:] call[name[rs].shuffle, parameter[call[name[tag_indicator]][name[i]]]] variable[Nhint] assign[=] call[name[tag_indicator].sum, parameter[]] variable[sum_correl_tag] assign[=] call[name[np].sum, parameter[binary_operation[name[correl_vector] * name[tag_indicator]]]] variable[no_tag_indicator] assign[=] binary_operation[constant[1] - name[tag_indicator]] variable[Nmiss] assign[=] binary_operation[name[N] - name[Nhint]] variable[norm_tag] assign[=] binary_operation[constant[1.0] / name[sum_correl_tag]] variable[norm_no_tag] assign[=] binary_operation[constant[1.0] / name[Nmiss]] variable[RES] assign[=] call[name[np].cumsum, parameter[binary_operation[binary_operation[binary_operation[name[tag_indicator] * name[correl_vector]] * name[norm_tag]] - binary_operation[name[no_tag_indicator] * name[norm_no_tag]]]]] if name[scale] begin[:] variable[RES] assign[=] binary_operation[name[RES] / name[N]] if name[single] begin[:] variable[es_vec] assign[=] call[name[RES].sum, parameter[]] <ast.Tuple object at 0x7da20c991ea0> assign[=] tuple[[<ast.Subscript object at 0x7da20c9921a0>, <ast.Subscript object at 0x7da20c991b10>, <ast.Subscript object at 0x7da20c990160>]] return[tuple[[<ast.Name object at 0x7da20c991870>, <ast.Name object at 0x7da20c990580>, <ast.Name object at 0x7da20c990e20>, <ast.Name object at 0x7da20c9904f0>]]]
keyword[def] identifier[enrichment_score] ( identifier[gene_list] , identifier[correl_vector] , identifier[gene_set] , identifier[weighted_score_type] = literal[int] , identifier[nperm] = literal[int] , identifier[rs] = identifier[np] . identifier[random] . identifier[RandomState] (), identifier[single] = keyword[False] , identifier[scale] = keyword[False] ): literal[string] identifier[N] = identifier[len] ( identifier[gene_list] ) identifier[tag_indicator] = identifier[np] . identifier[in1d] ( identifier[gene_list] , identifier[gene_set] , identifier[assume_unique] = keyword[True] ). identifier[astype] ( identifier[int] ) keyword[if] identifier[weighted_score_type] == literal[int] : identifier[correl_vector] = identifier[np] . identifier[repeat] ( literal[int] , identifier[N] ) keyword[else] : identifier[correl_vector] = identifier[np] . identifier[abs] ( identifier[correl_vector] )** identifier[weighted_score_type] identifier[hit_ind] = identifier[np] . identifier[flatnonzero] ( identifier[tag_indicator] ). identifier[tolist] () identifier[axis] = literal[int] identifier[tag_indicator] = identifier[np] . identifier[tile] ( identifier[tag_indicator] ,( identifier[nperm] + literal[int] , literal[int] )) identifier[correl_vector] = identifier[np] . identifier[tile] ( identifier[correl_vector] ,( identifier[nperm] + literal[int] , literal[int] )) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nperm] ): identifier[rs] . identifier[shuffle] ( identifier[tag_indicator] [ identifier[i] ]) identifier[Nhint] = identifier[tag_indicator] . identifier[sum] ( identifier[axis] = identifier[axis] , identifier[keepdims] = keyword[True] ) identifier[sum_correl_tag] = identifier[np] . identifier[sum] ( identifier[correl_vector] * identifier[tag_indicator] , identifier[axis] = identifier[axis] , identifier[keepdims] = keyword[True] ) identifier[no_tag_indicator] = literal[int] - identifier[tag_indicator] identifier[Nmiss] = identifier[N] - identifier[Nhint] identifier[norm_tag] = literal[int] / identifier[sum_correl_tag] identifier[norm_no_tag] = literal[int] / identifier[Nmiss] identifier[RES] = identifier[np] . identifier[cumsum] ( identifier[tag_indicator] * identifier[correl_vector] * identifier[norm_tag] - identifier[no_tag_indicator] * identifier[norm_no_tag] , identifier[axis] = identifier[axis] ) keyword[if] identifier[scale] : identifier[RES] = identifier[RES] / identifier[N] keyword[if] identifier[single] : identifier[es_vec] = identifier[RES] . identifier[sum] ( identifier[axis] = identifier[axis] ) keyword[else] : identifier[max_ES] , identifier[min_ES] = identifier[RES] . identifier[max] ( identifier[axis] = identifier[axis] ), identifier[RES] . identifier[min] ( identifier[axis] = identifier[axis] ) identifier[es_vec] = identifier[np] . identifier[where] ( identifier[np] . identifier[abs] ( identifier[max_ES] )> identifier[np] . identifier[abs] ( identifier[min_ES] ), identifier[max_ES] , identifier[min_ES] ) identifier[es] , identifier[esnull] , identifier[RES] = identifier[es_vec] [- literal[int] ], identifier[es_vec] [:- literal[int] ], identifier[RES] [- literal[int] ,:] keyword[return] identifier[es] , identifier[esnull] , identifier[hit_ind] , identifier[RES]
def enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type=1, nperm=1000, rs=np.random.RandomState(), single=False, scale=False): """This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA. :param gene_list: The ordered gene list gene_name_list, rank_metric.index.values :param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set. :param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation is a very reasonable choice that allows significant gene sets with less than perfect coherence. options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1 might be appropriate. On the other hand, if one uses sets with large number of genes and only a small subset of those is expected to be coherent, then one could consider using p > 1. Our recommendation is to use p = 1 and use other settings only if you are very experienced with the method and its behavior. :param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in the gene list. Or rankings, rank_metric.values :param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value equal to the permutation number. :param rs: Random state for initializing gene list shuffling. Default: np.random.RandomState(seed=None) :return: ES: Enrichment score (real number between -1 and +1) ESNULL: Enrichment score calculated from random permutations. Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set. RES: Numerical vector containing the running enrichment score for all locations in the gene list . """ N = len(gene_list) # Test whether each element of a 1-D array is also present in a second array # It's more intuitive here than original enrichment_score source code. # use .astype to covert bool to integer tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag) if weighted_score_type == 0: correl_vector = np.repeat(1, N) # depends on [control=['if'], data=[]] else: correl_vector = np.abs(correl_vector) ** weighted_score_type # get indices of tag_indicator hit_ind = np.flatnonzero(tag_indicator).tolist() # if used for compute esnull, set esnull equal to permutation number, e.g. 1000 # else just compute enrichment scores # set axis to 1, because we have 2D array axis = 1 tag_indicator = np.tile(tag_indicator, (nperm + 1, 1)) correl_vector = np.tile(correl_vector, (nperm + 1, 1)) # gene list permutation for i in range(nperm): rs.shuffle(tag_indicator[i]) # depends on [control=['for'], data=['i']] # np.apply_along_axis(rs.shuffle, 1, tag_indicator) Nhint = tag_indicator.sum(axis=axis, keepdims=True) sum_correl_tag = np.sum(correl_vector * tag_indicator, axis=axis, keepdims=True) # compute ES score, the code below is identical to gsea enrichment_score method. no_tag_indicator = 1 - tag_indicator Nmiss = N - Nhint norm_tag = 1.0 / sum_correl_tag norm_no_tag = 1.0 / Nmiss RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag, axis=axis) if scale: RES = RES / N # depends on [control=['if'], data=[]] if single: es_vec = RES.sum(axis=axis) # depends on [control=['if'], data=[]] else: (max_ES, min_ES) = (RES.max(axis=axis), RES.min(axis=axis)) es_vec = np.where(np.abs(max_ES) > np.abs(min_ES), max_ES, min_ES) # extract values (es, esnull, RES) = (es_vec[-1], es_vec[:-1], RES[-1, :]) return (es, esnull, hit_ind, RES)
def create_kubernetes_role(self, name, bound_service_account_names, bound_service_account_namespaces, ttl="", max_ttl="", period="", policies=None, mount_point='kubernetes'): """POST /auth/<mount_point>/role/:name :param name: Name of the role. :type name: str. :param bound_service_account_names: List of service account names able to access this role. If set to "*" all names are allowed, both this and bound_service_account_namespaces can not be "*". :type bound_service_account_names: list. :param bound_service_account_namespaces: List of namespaces allowed to access this role. If set to "*" all namespaces are allowed, both this and bound_service_account_names can not be set to "*". :type bound_service_account_namespaces: list. :param ttl: The TTL period of tokens issued using this role in seconds. :type ttl: str. :param max_ttl: The maximum allowed lifetime of tokens issued in seconds using this role. :type max_ttl: str. :param period: If set, indicates that the token generated using this role should never expire. The token should be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the value of this parameter. :type period: str. :param policies: Policies to be set on tokens issued using this role :type policies: list. :param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes". :type mount_point: str. :return: Will be an empty body with a 204 status code upon success :rtype: requests.Response. """ if bound_service_account_names == '*' and bound_service_account_namespaces == '*': error_message = 'bound_service_account_names and bound_service_account_namespaces can not both be set to "*"' raise exceptions.ParamValidationError(error_message) params = { 'bound_service_account_names': bound_service_account_names, 'bound_service_account_namespaces': bound_service_account_namespaces, 'ttl': ttl, 'max_ttl': max_ttl, 'period': period, 'policies': policies, } url = 'v1/auth/{0}/role/{1}'.format(mount_point, name) return self._adapter.post(url, json=params)
def function[create_kubernetes_role, parameter[self, name, bound_service_account_names, bound_service_account_namespaces, ttl, max_ttl, period, policies, mount_point]]: constant[POST /auth/<mount_point>/role/:name :param name: Name of the role. :type name: str. :param bound_service_account_names: List of service account names able to access this role. If set to "*" all names are allowed, both this and bound_service_account_namespaces can not be "*". :type bound_service_account_names: list. :param bound_service_account_namespaces: List of namespaces allowed to access this role. If set to "*" all namespaces are allowed, both this and bound_service_account_names can not be set to "*". :type bound_service_account_namespaces: list. :param ttl: The TTL period of tokens issued using this role in seconds. :type ttl: str. :param max_ttl: The maximum allowed lifetime of tokens issued in seconds using this role. :type max_ttl: str. :param period: If set, indicates that the token generated using this role should never expire. The token should be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the value of this parameter. :type period: str. :param policies: Policies to be set on tokens issued using this role :type policies: list. :param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes". :type mount_point: str. :return: Will be an empty body with a 204 status code upon success :rtype: requests.Response. ] if <ast.BoolOp object at 0x7da20c993e80> begin[:] variable[error_message] assign[=] constant[bound_service_account_names and bound_service_account_namespaces can not both be set to "*"] <ast.Raise object at 0x7da20c992c20> variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c9904c0>, <ast.Constant object at 0x7da20c9935e0>, <ast.Constant object at 0x7da20c993310>, <ast.Constant object at 0x7da20c990670>, <ast.Constant object at 0x7da20c990250>, <ast.Constant object at 0x7da20c991c60>], [<ast.Name object at 0x7da20c990e80>, <ast.Name object at 0x7da20c990e20>, <ast.Name object at 0x7da20c9912a0>, <ast.Name object at 0x7da20c993190>, <ast.Name object at 0x7da20c992170>, <ast.Name object at 0x7da20c990d90>]] variable[url] assign[=] call[constant[v1/auth/{0}/role/{1}].format, parameter[name[mount_point], name[name]]] return[call[name[self]._adapter.post, parameter[name[url]]]]
keyword[def] identifier[create_kubernetes_role] ( identifier[self] , identifier[name] , identifier[bound_service_account_names] , identifier[bound_service_account_namespaces] , identifier[ttl] = literal[string] , identifier[max_ttl] = literal[string] , identifier[period] = literal[string] , identifier[policies] = keyword[None] , identifier[mount_point] = literal[string] ): literal[string] keyword[if] identifier[bound_service_account_names] == literal[string] keyword[and] identifier[bound_service_account_namespaces] == literal[string] : identifier[error_message] = literal[string] keyword[raise] identifier[exceptions] . identifier[ParamValidationError] ( identifier[error_message] ) identifier[params] ={ literal[string] : identifier[bound_service_account_names] , literal[string] : identifier[bound_service_account_namespaces] , literal[string] : identifier[ttl] , literal[string] : identifier[max_ttl] , literal[string] : identifier[period] , literal[string] : identifier[policies] , } identifier[url] = literal[string] . identifier[format] ( identifier[mount_point] , identifier[name] ) keyword[return] identifier[self] . identifier[_adapter] . identifier[post] ( identifier[url] , identifier[json] = identifier[params] )
def create_kubernetes_role(self, name, bound_service_account_names, bound_service_account_namespaces, ttl='', max_ttl='', period='', policies=None, mount_point='kubernetes'): """POST /auth/<mount_point>/role/:name :param name: Name of the role. :type name: str. :param bound_service_account_names: List of service account names able to access this role. If set to "*" all names are allowed, both this and bound_service_account_namespaces can not be "*". :type bound_service_account_names: list. :param bound_service_account_namespaces: List of namespaces allowed to access this role. If set to "*" all namespaces are allowed, both this and bound_service_account_names can not be set to "*". :type bound_service_account_namespaces: list. :param ttl: The TTL period of tokens issued using this role in seconds. :type ttl: str. :param max_ttl: The maximum allowed lifetime of tokens issued in seconds using this role. :type max_ttl: str. :param period: If set, indicates that the token generated using this role should never expire. The token should be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the value of this parameter. :type period: str. :param policies: Policies to be set on tokens issued using this role :type policies: list. :param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes". :type mount_point: str. :return: Will be an empty body with a 204 status code upon success :rtype: requests.Response. """ if bound_service_account_names == '*' and bound_service_account_namespaces == '*': error_message = 'bound_service_account_names and bound_service_account_namespaces can not both be set to "*"' raise exceptions.ParamValidationError(error_message) # depends on [control=['if'], data=[]] params = {'bound_service_account_names': bound_service_account_names, 'bound_service_account_namespaces': bound_service_account_namespaces, 'ttl': ttl, 'max_ttl': max_ttl, 'period': period, 'policies': policies} url = 'v1/auth/{0}/role/{1}'.format(mount_point, name) return self._adapter.post(url, json=params)
def device_time_str(self, resp, indent=" "): """Convenience to string method. """ time = resp.time uptime = resp.uptime downtime = resp.downtime time_s = datetime.datetime.utcfromtimestamp(time/1000000000) if time != None else None uptime_s = round(nanosec_to_hours(uptime), 2) if uptime != None else None downtime_s = round(nanosec_to_hours(downtime), 2) if downtime != None else None s = "Current Time: {} ({} UTC)\n".format(time, time_s) s += indent + "Uptime (ns): {} ({} hours)\n".format(uptime, uptime_s) s += indent + "Last Downtime Duration +/-5s (ns): {} ({} hours)\n".format(downtime, downtime_s) return s
def function[device_time_str, parameter[self, resp, indent]]: constant[Convenience to string method. ] variable[time] assign[=] name[resp].time variable[uptime] assign[=] name[resp].uptime variable[downtime] assign[=] name[resp].downtime variable[time_s] assign[=] <ast.IfExp object at 0x7da18f812830> variable[uptime_s] assign[=] <ast.IfExp object at 0x7da18f811510> variable[downtime_s] assign[=] <ast.IfExp object at 0x7da18f8114e0> variable[s] assign[=] call[constant[Current Time: {} ({} UTC) ].format, parameter[name[time], name[time_s]]] <ast.AugAssign object at 0x7da18f811d80> <ast.AugAssign object at 0x7da18bc71000> return[name[s]]
keyword[def] identifier[device_time_str] ( identifier[self] , identifier[resp] , identifier[indent] = literal[string] ): literal[string] identifier[time] = identifier[resp] . identifier[time] identifier[uptime] = identifier[resp] . identifier[uptime] identifier[downtime] = identifier[resp] . identifier[downtime] identifier[time_s] = identifier[datetime] . identifier[datetime] . identifier[utcfromtimestamp] ( identifier[time] / literal[int] ) keyword[if] identifier[time] != keyword[None] keyword[else] keyword[None] identifier[uptime_s] = identifier[round] ( identifier[nanosec_to_hours] ( identifier[uptime] ), literal[int] ) keyword[if] identifier[uptime] != keyword[None] keyword[else] keyword[None] identifier[downtime_s] = identifier[round] ( identifier[nanosec_to_hours] ( identifier[downtime] ), literal[int] ) keyword[if] identifier[downtime] != keyword[None] keyword[else] keyword[None] identifier[s] = literal[string] . identifier[format] ( identifier[time] , identifier[time_s] ) identifier[s] += identifier[indent] + literal[string] . identifier[format] ( identifier[uptime] , identifier[uptime_s] ) identifier[s] += identifier[indent] + literal[string] . identifier[format] ( identifier[downtime] , identifier[downtime_s] ) keyword[return] identifier[s]
def device_time_str(self, resp, indent=' '): """Convenience to string method. """ time = resp.time uptime = resp.uptime downtime = resp.downtime time_s = datetime.datetime.utcfromtimestamp(time / 1000000000) if time != None else None uptime_s = round(nanosec_to_hours(uptime), 2) if uptime != None else None downtime_s = round(nanosec_to_hours(downtime), 2) if downtime != None else None s = 'Current Time: {} ({} UTC)\n'.format(time, time_s) s += indent + 'Uptime (ns): {} ({} hours)\n'.format(uptime, uptime_s) s += indent + 'Last Downtime Duration +/-5s (ns): {} ({} hours)\n'.format(downtime, downtime_s) return s
def add_permute(self, name, dim, input_name, output_name): """ Add a permute layer. Assumes that the input has dimensions in the order [Seq, C, H, W] Parameters ---------- name: str The name of this layer. dim: tuple The order in which to permute the input dimensions = [seq,C,H,W]. Must have length 4 and a permutation of ``[0, 1, 2, 3]``. examples: Lets say input has shape: [seq, C, H, W]. If ``dim`` is set to ``[0, 3, 1, 2]``, then the output has shape ``[W,C,H]`` and has the same sequence length that of the input. If ``dim`` is set to ``[3, 1, 2, 0]``, and the input is a sequence of data with length ``Seq`` and shape ``[C, 1, 1]``, then the output is a unit sequence of data with shape ``[C, 1, Seq]``. If ``dim`` is set to ``[0, 3, 2, 1]``, the output is a reverse of the input: ``[C, H, W] -> [W, H, C]``. If ``dim`` is not set, or is set to ``[0, 1, 2, 3]``, the output is the same as the input. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_flatten, add_reshape """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.permute spec_layer_params.axis.extend(list(dim)) if len(dim) != 4: raise ValueError("Length of the 'dim' parameter must be equal to 4")
def function[add_permute, parameter[self, name, dim, input_name, output_name]]: constant[ Add a permute layer. Assumes that the input has dimensions in the order [Seq, C, H, W] Parameters ---------- name: str The name of this layer. dim: tuple The order in which to permute the input dimensions = [seq,C,H,W]. Must have length 4 and a permutation of ``[0, 1, 2, 3]``. examples: Lets say input has shape: [seq, C, H, W]. If ``dim`` is set to ``[0, 3, 1, 2]``, then the output has shape ``[W,C,H]`` and has the same sequence length that of the input. If ``dim`` is set to ``[3, 1, 2, 0]``, and the input is a sequence of data with length ``Seq`` and shape ``[C, 1, 1]``, then the output is a unit sequence of data with shape ``[C, 1, Seq]``. If ``dim`` is set to ``[0, 3, 2, 1]``, the output is a reverse of the input: ``[C, H, W] -> [W, H, C]``. If ``dim`` is not set, or is set to ``[0, 1, 2, 3]``, the output is the same as the input. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_flatten, add_reshape ] variable[spec] assign[=] name[self].spec variable[nn_spec] assign[=] name[self].nn_spec variable[spec_layer] assign[=] call[name[nn_spec].layers.add, parameter[]] name[spec_layer].name assign[=] name[name] call[name[spec_layer].input.append, parameter[name[input_name]]] call[name[spec_layer].output.append, parameter[name[output_name]]] variable[spec_layer_params] assign[=] name[spec_layer].permute call[name[spec_layer_params].axis.extend, parameter[call[name[list], parameter[name[dim]]]]] if compare[call[name[len], parameter[name[dim]]] not_equal[!=] constant[4]] begin[:] <ast.Raise object at 0x7da1b208d1b0>
keyword[def] identifier[add_permute] ( identifier[self] , identifier[name] , identifier[dim] , identifier[input_name] , identifier[output_name] ): literal[string] identifier[spec] = identifier[self] . identifier[spec] identifier[nn_spec] = identifier[self] . identifier[nn_spec] identifier[spec_layer] = identifier[nn_spec] . identifier[layers] . identifier[add] () identifier[spec_layer] . identifier[name] = identifier[name] identifier[spec_layer] . identifier[input] . identifier[append] ( identifier[input_name] ) identifier[spec_layer] . identifier[output] . identifier[append] ( identifier[output_name] ) identifier[spec_layer_params] = identifier[spec_layer] . identifier[permute] identifier[spec_layer_params] . identifier[axis] . identifier[extend] ( identifier[list] ( identifier[dim] )) keyword[if] identifier[len] ( identifier[dim] )!= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] )
def add_permute(self, name, dim, input_name, output_name): """ Add a permute layer. Assumes that the input has dimensions in the order [Seq, C, H, W] Parameters ---------- name: str The name of this layer. dim: tuple The order in which to permute the input dimensions = [seq,C,H,W]. Must have length 4 and a permutation of ``[0, 1, 2, 3]``. examples: Lets say input has shape: [seq, C, H, W]. If ``dim`` is set to ``[0, 3, 1, 2]``, then the output has shape ``[W,C,H]`` and has the same sequence length that of the input. If ``dim`` is set to ``[3, 1, 2, 0]``, and the input is a sequence of data with length ``Seq`` and shape ``[C, 1, 1]``, then the output is a unit sequence of data with shape ``[C, 1, Seq]``. If ``dim`` is set to ``[0, 3, 2, 1]``, the output is a reverse of the input: ``[C, H, W] -> [W, H, C]``. If ``dim`` is not set, or is set to ``[0, 1, 2, 3]``, the output is the same as the input. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_flatten, add_reshape """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.permute spec_layer_params.axis.extend(list(dim)) if len(dim) != 4: raise ValueError("Length of the 'dim' parameter must be equal to 4") # depends on [control=['if'], data=[]]
def dispatch_write(self, buf): """There is new stuff to write when possible""" if self.state != STATE_DEAD and self.enabled: super().dispatch_write(buf) return True return False
def function[dispatch_write, parameter[self, buf]]: constant[There is new stuff to write when possible] if <ast.BoolOp object at 0x7da20cabef50> begin[:] call[call[name[super], parameter[]].dispatch_write, parameter[name[buf]]] return[constant[True]] return[constant[False]]
keyword[def] identifier[dispatch_write] ( identifier[self] , identifier[buf] ): literal[string] keyword[if] identifier[self] . identifier[state] != identifier[STATE_DEAD] keyword[and] identifier[self] . identifier[enabled] : identifier[super] (). identifier[dispatch_write] ( identifier[buf] ) keyword[return] keyword[True] keyword[return] keyword[False]
def dispatch_write(self, buf): """There is new stuff to write when possible""" if self.state != STATE_DEAD and self.enabled: super().dispatch_write(buf) return True # depends on [control=['if'], data=[]] return False
def requireCompatibleAPI(): """If PyQt4's API should be configured to be compatible with PySide's (i.e. QString and QVariant should not be explicitly exported, cf. documentation of sip.setapi()), call this function to check that the PyQt4 was properly imported. (It will always be configured this way by this module, but it could have been imported before we got a hand on doing so.) """ if 'PyQt4.QtCore' in sys.modules: import sip for api in ('QVariant', 'QString'): if sip.getapi(api) != 2: raise RuntimeError('%s API already set to V%d, but should be 2' % (api, sip.getapi(api)))
def function[requireCompatibleAPI, parameter[]]: constant[If PyQt4's API should be configured to be compatible with PySide's (i.e. QString and QVariant should not be explicitly exported, cf. documentation of sip.setapi()), call this function to check that the PyQt4 was properly imported. (It will always be configured this way by this module, but it could have been imported before we got a hand on doing so.) ] if compare[constant[PyQt4.QtCore] in name[sys].modules] begin[:] import module[sip] for taget[name[api]] in starred[tuple[[<ast.Constant object at 0x7da18fe91ea0>, <ast.Constant object at 0x7da18fe92aa0>]]] begin[:] if compare[call[name[sip].getapi, parameter[name[api]]] not_equal[!=] constant[2]] begin[:] <ast.Raise object at 0x7da18fe91f60>
keyword[def] identifier[requireCompatibleAPI] (): literal[string] keyword[if] literal[string] keyword[in] identifier[sys] . identifier[modules] : keyword[import] identifier[sip] keyword[for] identifier[api] keyword[in] ( literal[string] , literal[string] ): keyword[if] identifier[sip] . identifier[getapi] ( identifier[api] )!= literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] %( identifier[api] , identifier[sip] . identifier[getapi] ( identifier[api] )))
def requireCompatibleAPI(): """If PyQt4's API should be configured to be compatible with PySide's (i.e. QString and QVariant should not be explicitly exported, cf. documentation of sip.setapi()), call this function to check that the PyQt4 was properly imported. (It will always be configured this way by this module, but it could have been imported before we got a hand on doing so.) """ if 'PyQt4.QtCore' in sys.modules: import sip for api in ('QVariant', 'QString'): if sip.getapi(api) != 2: raise RuntimeError('%s API already set to V%d, but should be 2' % (api, sip.getapi(api))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['api']] # depends on [control=['if'], data=[]]
def sample_conditional(self, y, t, size=1): """ Draw samples from the predictive conditional distribution. You must call :func:`GP.compute` before this function. :param y: ``(nsamples, )`` The observations to condition the model on. :param t: ``(ntest, )`` or ``(ntest, ndim)`` The coordinates where the predictive distribution should be computed. :param size: (optional) The number of samples to draw. (default: ``1``) Returns **samples** ``(size, ntest)``, a list of predictions at coordinates given by ``t``. """ mu, cov = self.predict(y, t) return multivariate_gaussian_samples(cov, size, mean=mu)
def function[sample_conditional, parameter[self, y, t, size]]: constant[ Draw samples from the predictive conditional distribution. You must call :func:`GP.compute` before this function. :param y: ``(nsamples, )`` The observations to condition the model on. :param t: ``(ntest, )`` or ``(ntest, ndim)`` The coordinates where the predictive distribution should be computed. :param size: (optional) The number of samples to draw. (default: ``1``) Returns **samples** ``(size, ntest)``, a list of predictions at coordinates given by ``t``. ] <ast.Tuple object at 0x7da1b0721ba0> assign[=] call[name[self].predict, parameter[name[y], name[t]]] return[call[name[multivariate_gaussian_samples], parameter[name[cov], name[size]]]]
keyword[def] identifier[sample_conditional] ( identifier[self] , identifier[y] , identifier[t] , identifier[size] = literal[int] ): literal[string] identifier[mu] , identifier[cov] = identifier[self] . identifier[predict] ( identifier[y] , identifier[t] ) keyword[return] identifier[multivariate_gaussian_samples] ( identifier[cov] , identifier[size] , identifier[mean] = identifier[mu] )
def sample_conditional(self, y, t, size=1): """ Draw samples from the predictive conditional distribution. You must call :func:`GP.compute` before this function. :param y: ``(nsamples, )`` The observations to condition the model on. :param t: ``(ntest, )`` or ``(ntest, ndim)`` The coordinates where the predictive distribution should be computed. :param size: (optional) The number of samples to draw. (default: ``1``) Returns **samples** ``(size, ntest)``, a list of predictions at coordinates given by ``t``. """ (mu, cov) = self.predict(y, t) return multivariate_gaussian_samples(cov, size, mean=mu)
def learn(self, msg, learnas): """Learn message as spam/ham or forget""" if not isinstance(learnas, types.StringTypes): raise SpamCError('The learnas option is invalid') if learnas.lower() == 'forget': resp = self.tell(msg, 'forget') else: resp = self.tell(msg, 'learn', learnas) return resp
def function[learn, parameter[self, msg, learnas]]: constant[Learn message as spam/ham or forget] if <ast.UnaryOp object at 0x7da1b0bb0610> begin[:] <ast.Raise object at 0x7da204566b60> if compare[call[name[learnas].lower, parameter[]] equal[==] constant[forget]] begin[:] variable[resp] assign[=] call[name[self].tell, parameter[name[msg], constant[forget]]] return[name[resp]]
keyword[def] identifier[learn] ( identifier[self] , identifier[msg] , identifier[learnas] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[learnas] , identifier[types] . identifier[StringTypes] ): keyword[raise] identifier[SpamCError] ( literal[string] ) keyword[if] identifier[learnas] . identifier[lower] ()== literal[string] : identifier[resp] = identifier[self] . identifier[tell] ( identifier[msg] , literal[string] ) keyword[else] : identifier[resp] = identifier[self] . identifier[tell] ( identifier[msg] , literal[string] , identifier[learnas] ) keyword[return] identifier[resp]
def learn(self, msg, learnas): """Learn message as spam/ham or forget""" if not isinstance(learnas, types.StringTypes): raise SpamCError('The learnas option is invalid') # depends on [control=['if'], data=[]] if learnas.lower() == 'forget': resp = self.tell(msg, 'forget') # depends on [control=['if'], data=[]] else: resp = self.tell(msg, 'learn', learnas) return resp
def time_range(self,flag=None): ''' time range of the current dataset :keyword flag: use a flag array to know the time range of an indexed slice of the object ''' if self.count==0: return [[None,None],[None,None]] if flag is None : return cnes_convert([self.date.min(),self.date.max()]) else : return cnes_convert([self.date.compress(flag).min(),self.date.compress(flag).max()])
def function[time_range, parameter[self, flag]]: constant[ time range of the current dataset :keyword flag: use a flag array to know the time range of an indexed slice of the object ] if compare[name[self].count equal[==] constant[0]] begin[:] return[list[[<ast.List object at 0x7da1b0812980>, <ast.List object at 0x7da1b0812890>]]] if compare[name[flag] is constant[None]] begin[:] return[call[name[cnes_convert], parameter[list[[<ast.Call object at 0x7da1b08131f0>, <ast.Call object at 0x7da1b08130a0>]]]]]
keyword[def] identifier[time_range] ( identifier[self] , identifier[flag] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[count] == literal[int] : keyword[return] [[ keyword[None] , keyword[None] ],[ keyword[None] , keyword[None] ]] keyword[if] identifier[flag] keyword[is] keyword[None] : keyword[return] identifier[cnes_convert] ([ identifier[self] . identifier[date] . identifier[min] (), identifier[self] . identifier[date] . identifier[max] ()]) keyword[else] : keyword[return] identifier[cnes_convert] ([ identifier[self] . identifier[date] . identifier[compress] ( identifier[flag] ). identifier[min] (), identifier[self] . identifier[date] . identifier[compress] ( identifier[flag] ). identifier[max] ()])
def time_range(self, flag=None): """ time range of the current dataset :keyword flag: use a flag array to know the time range of an indexed slice of the object """ if self.count == 0: return [[None, None], [None, None]] # depends on [control=['if'], data=[]] if flag is None: return cnes_convert([self.date.min(), self.date.max()]) # depends on [control=['if'], data=[]] else: return cnes_convert([self.date.compress(flag).min(), self.date.compress(flag).max()])
def pair_strings_sum_formatter(a, b): """ Formats the sum of a and b. Note ---- Both inputs are numbers already converted to strings. """ if b[:1] == "-": return "{0} - {1}".format(a, b[1:]) return "{0} + {1}".format(a, b)
def function[pair_strings_sum_formatter, parameter[a, b]]: constant[ Formats the sum of a and b. Note ---- Both inputs are numbers already converted to strings. ] if compare[call[name[b]][<ast.Slice object at 0x7da1b069a590>] equal[==] constant[-]] begin[:] return[call[constant[{0} - {1}].format, parameter[name[a], call[name[b]][<ast.Slice object at 0x7da1b0699540>]]]] return[call[constant[{0} + {1}].format, parameter[name[a], name[b]]]]
keyword[def] identifier[pair_strings_sum_formatter] ( identifier[a] , identifier[b] ): literal[string] keyword[if] identifier[b] [: literal[int] ]== literal[string] : keyword[return] literal[string] . identifier[format] ( identifier[a] , identifier[b] [ literal[int] :]) keyword[return] literal[string] . identifier[format] ( identifier[a] , identifier[b] )
def pair_strings_sum_formatter(a, b): """ Formats the sum of a and b. Note ---- Both inputs are numbers already converted to strings. """ if b[:1] == '-': return '{0} - {1}'.format(a, b[1:]) # depends on [control=['if'], data=[]] return '{0} + {1}'.format(a, b)
def uninstall(self, name: str, force: bool = False, noprune: bool = False ) -> None: """ Attempts to uninstall a given Docker image. Parameters: name: the name of the Docker image. force: a flag indicating whether or not an exception should be thrown if the image associated with the given build instructions is not installed. If `True`, no exception will be thrown; if `False`, exception will be thrown. noprune: a flag indicating whether or not dangling image layers should also be removed. Raises: docker.errors.ImageNotFound: if the image associated with the given instructions can't be found. """ try: self.__docker.images.remove(image=name, force=force, noprune=noprune) except docker.errors.ImageNotFound as e: if force: return raise e
def function[uninstall, parameter[self, name, force, noprune]]: constant[ Attempts to uninstall a given Docker image. Parameters: name: the name of the Docker image. force: a flag indicating whether or not an exception should be thrown if the image associated with the given build instructions is not installed. If `True`, no exception will be thrown; if `False`, exception will be thrown. noprune: a flag indicating whether or not dangling image layers should also be removed. Raises: docker.errors.ImageNotFound: if the image associated with the given instructions can't be found. ] <ast.Try object at 0x7da1b0e2c940>
keyword[def] identifier[uninstall] ( identifier[self] , identifier[name] : identifier[str] , identifier[force] : identifier[bool] = keyword[False] , identifier[noprune] : identifier[bool] = keyword[False] )-> keyword[None] : literal[string] keyword[try] : identifier[self] . identifier[__docker] . identifier[images] . identifier[remove] ( identifier[image] = identifier[name] , identifier[force] = identifier[force] , identifier[noprune] = identifier[noprune] ) keyword[except] identifier[docker] . identifier[errors] . identifier[ImageNotFound] keyword[as] identifier[e] : keyword[if] identifier[force] : keyword[return] keyword[raise] identifier[e]
def uninstall(self, name: str, force: bool=False, noprune: bool=False) -> None: """ Attempts to uninstall a given Docker image. Parameters: name: the name of the Docker image. force: a flag indicating whether or not an exception should be thrown if the image associated with the given build instructions is not installed. If `True`, no exception will be thrown; if `False`, exception will be thrown. noprune: a flag indicating whether or not dangling image layers should also be removed. Raises: docker.errors.ImageNotFound: if the image associated with the given instructions can't be found. """ try: self.__docker.images.remove(image=name, force=force, noprune=noprune) # depends on [control=['try'], data=[]] except docker.errors.ImageNotFound as e: if force: return # depends on [control=['if'], data=[]] raise e # depends on [control=['except'], data=['e']]
def token(self, value): """ Set the Token of the message. :type value: String :param value: the Token :raise AttributeError: if value is longer than 256 """ if value is None: self._token = value return if not isinstance(value, str): value = str(value) if len(value) > 256: raise AttributeError self._token = value
def function[token, parameter[self, value]]: constant[ Set the Token of the message. :type value: String :param value: the Token :raise AttributeError: if value is longer than 256 ] if compare[name[value] is constant[None]] begin[:] name[self]._token assign[=] name[value] return[None] if <ast.UnaryOp object at 0x7da207f03ca0> begin[:] variable[value] assign[=] call[name[str], parameter[name[value]]] if compare[call[name[len], parameter[name[value]]] greater[>] constant[256]] begin[:] <ast.Raise object at 0x7da207f01870> name[self]._token assign[=] name[value]
keyword[def] identifier[token] ( identifier[self] , identifier[value] ): literal[string] keyword[if] identifier[value] keyword[is] keyword[None] : identifier[self] . identifier[_token] = identifier[value] keyword[return] keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[str] ): identifier[value] = identifier[str] ( identifier[value] ) keyword[if] identifier[len] ( identifier[value] )> literal[int] : keyword[raise] identifier[AttributeError] identifier[self] . identifier[_token] = identifier[value]
def token(self, value): """ Set the Token of the message. :type value: String :param value: the Token :raise AttributeError: if value is longer than 256 """ if value is None: self._token = value return # depends on [control=['if'], data=['value']] if not isinstance(value, str): value = str(value) # depends on [control=['if'], data=[]] if len(value) > 256: raise AttributeError # depends on [control=['if'], data=[]] self._token = value
def _config_win32_domain(self, domain): """Configure a Domain registry entry.""" # we call str() on domain to convert it from unicode to ascii self.domain = dns.name.from_text(str(domain))
def function[_config_win32_domain, parameter[self, domain]]: constant[Configure a Domain registry entry.] name[self].domain assign[=] call[name[dns].name.from_text, parameter[call[name[str], parameter[name[domain]]]]]
keyword[def] identifier[_config_win32_domain] ( identifier[self] , identifier[domain] ): literal[string] identifier[self] . identifier[domain] = identifier[dns] . identifier[name] . identifier[from_text] ( identifier[str] ( identifier[domain] ))
def _config_win32_domain(self, domain): """Configure a Domain registry entry.""" # we call str() on domain to convert it from unicode to ascii self.domain = dns.name.from_text(str(domain))
def reads(text, fmt, as_version=4, **kwargs): """Read a notebook from a string""" fmt = copy(fmt) fmt = long_form_one_format(fmt) ext = fmt['extension'] if ext == '.ipynb': return nbformat.reads(text, as_version, **kwargs) format_name = read_format_from_metadata(text, ext) or fmt.get('format_name') if format_name: format_options = {} else: format_name, format_options = guess_format(text, ext) if format_name: fmt['format_name'] = format_name fmt.update(format_options) reader = TextNotebookConverter(fmt) notebook = reader.reads(text, **kwargs) rearrange_jupytext_metadata(notebook.metadata) if format_name and insert_or_test_version_number(): notebook.metadata.setdefault('jupytext', {}).setdefault('text_representation', {}).update( {'extension': ext, 'format_name': format_name}) return notebook
def function[reads, parameter[text, fmt, as_version]]: constant[Read a notebook from a string] variable[fmt] assign[=] call[name[copy], parameter[name[fmt]]] variable[fmt] assign[=] call[name[long_form_one_format], parameter[name[fmt]]] variable[ext] assign[=] call[name[fmt]][constant[extension]] if compare[name[ext] equal[==] constant[.ipynb]] begin[:] return[call[name[nbformat].reads, parameter[name[text], name[as_version]]]] variable[format_name] assign[=] <ast.BoolOp object at 0x7da18f58dcf0> if name[format_name] begin[:] variable[format_options] assign[=] dictionary[[], []] if name[format_name] begin[:] call[name[fmt]][constant[format_name]] assign[=] name[format_name] call[name[fmt].update, parameter[name[format_options]]] variable[reader] assign[=] call[name[TextNotebookConverter], parameter[name[fmt]]] variable[notebook] assign[=] call[name[reader].reads, parameter[name[text]]] call[name[rearrange_jupytext_metadata], parameter[name[notebook].metadata]] if <ast.BoolOp object at 0x7da18f58ff10> begin[:] call[call[call[name[notebook].metadata.setdefault, parameter[constant[jupytext], dictionary[[], []]]].setdefault, parameter[constant[text_representation], dictionary[[], []]]].update, parameter[dictionary[[<ast.Constant object at 0x7da18f58ed10>, <ast.Constant object at 0x7da18f58e500>], [<ast.Name object at 0x7da18f58de40>, <ast.Name object at 0x7da18f58c730>]]]] return[name[notebook]]
keyword[def] identifier[reads] ( identifier[text] , identifier[fmt] , identifier[as_version] = literal[int] ,** identifier[kwargs] ): literal[string] identifier[fmt] = identifier[copy] ( identifier[fmt] ) identifier[fmt] = identifier[long_form_one_format] ( identifier[fmt] ) identifier[ext] = identifier[fmt] [ literal[string] ] keyword[if] identifier[ext] == literal[string] : keyword[return] identifier[nbformat] . identifier[reads] ( identifier[text] , identifier[as_version] ,** identifier[kwargs] ) identifier[format_name] = identifier[read_format_from_metadata] ( identifier[text] , identifier[ext] ) keyword[or] identifier[fmt] . identifier[get] ( literal[string] ) keyword[if] identifier[format_name] : identifier[format_options] ={} keyword[else] : identifier[format_name] , identifier[format_options] = identifier[guess_format] ( identifier[text] , identifier[ext] ) keyword[if] identifier[format_name] : identifier[fmt] [ literal[string] ]= identifier[format_name] identifier[fmt] . identifier[update] ( identifier[format_options] ) identifier[reader] = identifier[TextNotebookConverter] ( identifier[fmt] ) identifier[notebook] = identifier[reader] . identifier[reads] ( identifier[text] ,** identifier[kwargs] ) identifier[rearrange_jupytext_metadata] ( identifier[notebook] . identifier[metadata] ) keyword[if] identifier[format_name] keyword[and] identifier[insert_or_test_version_number] (): identifier[notebook] . identifier[metadata] . identifier[setdefault] ( literal[string] ,{}). identifier[setdefault] ( literal[string] ,{}). identifier[update] ( { literal[string] : identifier[ext] , literal[string] : identifier[format_name] }) keyword[return] identifier[notebook]
def reads(text, fmt, as_version=4, **kwargs): """Read a notebook from a string""" fmt = copy(fmt) fmt = long_form_one_format(fmt) ext = fmt['extension'] if ext == '.ipynb': return nbformat.reads(text, as_version, **kwargs) # depends on [control=['if'], data=[]] format_name = read_format_from_metadata(text, ext) or fmt.get('format_name') if format_name: format_options = {} # depends on [control=['if'], data=[]] else: (format_name, format_options) = guess_format(text, ext) if format_name: fmt['format_name'] = format_name # depends on [control=['if'], data=[]] fmt.update(format_options) reader = TextNotebookConverter(fmt) notebook = reader.reads(text, **kwargs) rearrange_jupytext_metadata(notebook.metadata) if format_name and insert_or_test_version_number(): notebook.metadata.setdefault('jupytext', {}).setdefault('text_representation', {}).update({'extension': ext, 'format_name': format_name}) # depends on [control=['if'], data=[]] return notebook
def set_metadata(self, metadata_dict): """ Set the metadata on a dataset **metadata_dict**: A dictionary of metadata key-vals. Transforms this dict into an array of metadata objects for storage in the DB. """ if metadata_dict is None: return existing_metadata = [] for m in self.metadata: existing_metadata.append(m.key) if m.key in metadata_dict: if m.value != metadata_dict[m.key]: m.value = metadata_dict[m.key] for k, v in metadata_dict.items(): if k not in existing_metadata: m_i = Metadata(key=str(k),value=str(v)) self.metadata.append(m_i) metadata_to_delete = set(existing_metadata).difference(set(metadata_dict.keys())) for m in self.metadata: if m.key in metadata_to_delete: get_session().delete(m)
def function[set_metadata, parameter[self, metadata_dict]]: constant[ Set the metadata on a dataset **metadata_dict**: A dictionary of metadata key-vals. Transforms this dict into an array of metadata objects for storage in the DB. ] if compare[name[metadata_dict] is constant[None]] begin[:] return[None] variable[existing_metadata] assign[=] list[[]] for taget[name[m]] in starred[name[self].metadata] begin[:] call[name[existing_metadata].append, parameter[name[m].key]] if compare[name[m].key in name[metadata_dict]] begin[:] if compare[name[m].value not_equal[!=] call[name[metadata_dict]][name[m].key]] begin[:] name[m].value assign[=] call[name[metadata_dict]][name[m].key] for taget[tuple[[<ast.Name object at 0x7da20c794070>, <ast.Name object at 0x7da20c794fa0>]]] in starred[call[name[metadata_dict].items, parameter[]]] begin[:] if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[existing_metadata]] begin[:] variable[m_i] assign[=] call[name[Metadata], parameter[]] call[name[self].metadata.append, parameter[name[m_i]]] variable[metadata_to_delete] assign[=] call[call[name[set], parameter[name[existing_metadata]]].difference, parameter[call[name[set], parameter[call[name[metadata_dict].keys, parameter[]]]]]] for taget[name[m]] in starred[name[self].metadata] begin[:] if compare[name[m].key in name[metadata_to_delete]] begin[:] call[call[name[get_session], parameter[]].delete, parameter[name[m]]]
keyword[def] identifier[set_metadata] ( identifier[self] , identifier[metadata_dict] ): literal[string] keyword[if] identifier[metadata_dict] keyword[is] keyword[None] : keyword[return] identifier[existing_metadata] =[] keyword[for] identifier[m] keyword[in] identifier[self] . identifier[metadata] : identifier[existing_metadata] . identifier[append] ( identifier[m] . identifier[key] ) keyword[if] identifier[m] . identifier[key] keyword[in] identifier[metadata_dict] : keyword[if] identifier[m] . identifier[value] != identifier[metadata_dict] [ identifier[m] . identifier[key] ]: identifier[m] . identifier[value] = identifier[metadata_dict] [ identifier[m] . identifier[key] ] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[metadata_dict] . identifier[items] (): keyword[if] identifier[k] keyword[not] keyword[in] identifier[existing_metadata] : identifier[m_i] = identifier[Metadata] ( identifier[key] = identifier[str] ( identifier[k] ), identifier[value] = identifier[str] ( identifier[v] )) identifier[self] . identifier[metadata] . identifier[append] ( identifier[m_i] ) identifier[metadata_to_delete] = identifier[set] ( identifier[existing_metadata] ). identifier[difference] ( identifier[set] ( identifier[metadata_dict] . identifier[keys] ())) keyword[for] identifier[m] keyword[in] identifier[self] . identifier[metadata] : keyword[if] identifier[m] . identifier[key] keyword[in] identifier[metadata_to_delete] : identifier[get_session] (). identifier[delete] ( identifier[m] )
def set_metadata(self, metadata_dict): """ Set the metadata on a dataset **metadata_dict**: A dictionary of metadata key-vals. Transforms this dict into an array of metadata objects for storage in the DB. """ if metadata_dict is None: return # depends on [control=['if'], data=[]] existing_metadata = [] for m in self.metadata: existing_metadata.append(m.key) if m.key in metadata_dict: if m.value != metadata_dict[m.key]: m.value = metadata_dict[m.key] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['metadata_dict']] # depends on [control=['for'], data=['m']] for (k, v) in metadata_dict.items(): if k not in existing_metadata: m_i = Metadata(key=str(k), value=str(v)) self.metadata.append(m_i) # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=[]] metadata_to_delete = set(existing_metadata).difference(set(metadata_dict.keys())) for m in self.metadata: if m.key in metadata_to_delete: get_session().delete(m) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']]
def add(self, id, obj, criteria={}, force=False, _check_id=True): """ Add a :class:`Component <cqparts.Component>` instance to the database. :param id: unique id of entry, can be anything :type id: :class:`str` :param obj: component to be serialized, then added to the catalogue :type obj: :class:`Component <cqparts.Component>` :param criteria: arbitrary search criteria for the entry :type criteria: :class:`dict` :param force: if ``True``, entry is forcefully overwritten if it already exists. Otherwise an exception is raised :type force: :class:`bool` :param _check_id: if ``False``, duplicate ``id`` is not tested :type _check_id: :class:`bool` :raises TypeError: on parameter issues :raises ValueError: if a duplicate db entry is detected (and ``force`` is not set) :return: index of new entry :rtype: :class:`int` """ # Verify component if not isinstance(obj, Component): raise TypeError("can only add(%r), component is a %r" % ( Component, type(obj) )) # Serialize object obj_data = obj.serialize() # Add to database q = tinydb.Query() if (force or _check_id) and self.items.count(q.id == id): if force: self.items.remove(q.id == id) else: raise ValueError("entry with id '%s' already exists" % (id)) index = self.items.insert({ 'id': id, # must be unique 'criteria': criteria, 'obj': obj_data, }) return index
def function[add, parameter[self, id, obj, criteria, force, _check_id]]: constant[ Add a :class:`Component <cqparts.Component>` instance to the database. :param id: unique id of entry, can be anything :type id: :class:`str` :param obj: component to be serialized, then added to the catalogue :type obj: :class:`Component <cqparts.Component>` :param criteria: arbitrary search criteria for the entry :type criteria: :class:`dict` :param force: if ``True``, entry is forcefully overwritten if it already exists. Otherwise an exception is raised :type force: :class:`bool` :param _check_id: if ``False``, duplicate ``id`` is not tested :type _check_id: :class:`bool` :raises TypeError: on parameter issues :raises ValueError: if a duplicate db entry is detected (and ``force`` is not set) :return: index of new entry :rtype: :class:`int` ] if <ast.UnaryOp object at 0x7da18f58c430> begin[:] <ast.Raise object at 0x7da18f58e9e0> variable[obj_data] assign[=] call[name[obj].serialize, parameter[]] variable[q] assign[=] call[name[tinydb].Query, parameter[]] if <ast.BoolOp object at 0x7da18f58e5c0> begin[:] if name[force] begin[:] call[name[self].items.remove, parameter[compare[name[q].id equal[==] name[id]]]] variable[index] assign[=] call[name[self].items.insert, parameter[dictionary[[<ast.Constant object at 0x7da20e9611b0>, <ast.Constant object at 0x7da20e961690>, <ast.Constant object at 0x7da20e960280>], [<ast.Name object at 0x7da20e962ef0>, <ast.Name object at 0x7da20e961d20>, <ast.Name object at 0x7da20e961060>]]]] return[name[index]]
keyword[def] identifier[add] ( identifier[self] , identifier[id] , identifier[obj] , identifier[criteria] ={}, identifier[force] = keyword[False] , identifier[_check_id] = keyword[True] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[Component] ): keyword[raise] identifier[TypeError] ( literal[string] %( identifier[Component] , identifier[type] ( identifier[obj] ) )) identifier[obj_data] = identifier[obj] . identifier[serialize] () identifier[q] = identifier[tinydb] . identifier[Query] () keyword[if] ( identifier[force] keyword[or] identifier[_check_id] ) keyword[and] identifier[self] . identifier[items] . identifier[count] ( identifier[q] . identifier[id] == identifier[id] ): keyword[if] identifier[force] : identifier[self] . identifier[items] . identifier[remove] ( identifier[q] . identifier[id] == identifier[id] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[id] )) identifier[index] = identifier[self] . identifier[items] . identifier[insert] ({ literal[string] : identifier[id] , literal[string] : identifier[criteria] , literal[string] : identifier[obj_data] , }) keyword[return] identifier[index]
def add(self, id, obj, criteria={}, force=False, _check_id=True): """ Add a :class:`Component <cqparts.Component>` instance to the database. :param id: unique id of entry, can be anything :type id: :class:`str` :param obj: component to be serialized, then added to the catalogue :type obj: :class:`Component <cqparts.Component>` :param criteria: arbitrary search criteria for the entry :type criteria: :class:`dict` :param force: if ``True``, entry is forcefully overwritten if it already exists. Otherwise an exception is raised :type force: :class:`bool` :param _check_id: if ``False``, duplicate ``id`` is not tested :type _check_id: :class:`bool` :raises TypeError: on parameter issues :raises ValueError: if a duplicate db entry is detected (and ``force`` is not set) :return: index of new entry :rtype: :class:`int` """ # Verify component if not isinstance(obj, Component): raise TypeError('can only add(%r), component is a %r' % (Component, type(obj))) # depends on [control=['if'], data=[]] # Serialize object obj_data = obj.serialize() # Add to database q = tinydb.Query() if (force or _check_id) and self.items.count(q.id == id): if force: self.items.remove(q.id == id) # depends on [control=['if'], data=[]] else: raise ValueError("entry with id '%s' already exists" % id) # depends on [control=['if'], data=[]] # must be unique index = self.items.insert({'id': id, 'criteria': criteria, 'obj': obj_data}) return index
def _format_param_value(key, value): """Wraps string values in quotes, and returns as 'key=value'. """ if isinstance(value, str): value = "'{}'".format(value) return "{}={}".format(key, value)
def function[_format_param_value, parameter[key, value]]: constant[Wraps string values in quotes, and returns as 'key=value'. ] if call[name[isinstance], parameter[name[value], name[str]]] begin[:] variable[value] assign[=] call[constant['{}'].format, parameter[name[value]]] return[call[constant[{}={}].format, parameter[name[key], name[value]]]]
keyword[def] identifier[_format_param_value] ( identifier[key] , identifier[value] ): literal[string] keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ): identifier[value] = literal[string] . identifier[format] ( identifier[value] ) keyword[return] literal[string] . identifier[format] ( identifier[key] , identifier[value] )
def _format_param_value(key, value): """Wraps string values in quotes, and returns as 'key=value'. """ if isinstance(value, str): value = "'{}'".format(value) # depends on [control=['if'], data=[]] return '{}={}'.format(key, value)
def getinputfile(self, outputfile, loadmetadata=True, client=None,requiremetadata=False): """Grabs one input file for the specified output filename (raises a KeyError exception if there is no such output, StopIteration if there are no input files for it). Shortcut for getinputfiles()""" if isinstance(outputfile, CLAMOutputFile): outputfilename = str(outputfile).replace(os.path.join(self.projectpath,'output/'),'') else: outputfilename = outputfile if outputfilename not in self: raise KeyError("No such outputfile " + outputfilename) try: return next(self.getinputfiles(outputfile,loadmetadata,client,requiremetadata)) except StopIteration: raise StopIteration("No input files for outputfile " + outputfilename)
def function[getinputfile, parameter[self, outputfile, loadmetadata, client, requiremetadata]]: constant[Grabs one input file for the specified output filename (raises a KeyError exception if there is no such output, StopIteration if there are no input files for it). Shortcut for getinputfiles()] if call[name[isinstance], parameter[name[outputfile], name[CLAMOutputFile]]] begin[:] variable[outputfilename] assign[=] call[call[name[str], parameter[name[outputfile]]].replace, parameter[call[name[os].path.join, parameter[name[self].projectpath, constant[output/]]], constant[]]] if compare[name[outputfilename] <ast.NotIn object at 0x7da2590d7190> name[self]] begin[:] <ast.Raise object at 0x7da20c6c77c0> <ast.Try object at 0x7da20c6c5a80>
keyword[def] identifier[getinputfile] ( identifier[self] , identifier[outputfile] , identifier[loadmetadata] = keyword[True] , identifier[client] = keyword[None] , identifier[requiremetadata] = keyword[False] ): literal[string] keyword[if] identifier[isinstance] ( identifier[outputfile] , identifier[CLAMOutputFile] ): identifier[outputfilename] = identifier[str] ( identifier[outputfile] ). identifier[replace] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[projectpath] , literal[string] ), literal[string] ) keyword[else] : identifier[outputfilename] = identifier[outputfile] keyword[if] identifier[outputfilename] keyword[not] keyword[in] identifier[self] : keyword[raise] identifier[KeyError] ( literal[string] + identifier[outputfilename] ) keyword[try] : keyword[return] identifier[next] ( identifier[self] . identifier[getinputfiles] ( identifier[outputfile] , identifier[loadmetadata] , identifier[client] , identifier[requiremetadata] )) keyword[except] identifier[StopIteration] : keyword[raise] identifier[StopIteration] ( literal[string] + identifier[outputfilename] )
def getinputfile(self, outputfile, loadmetadata=True, client=None, requiremetadata=False): """Grabs one input file for the specified output filename (raises a KeyError exception if there is no such output, StopIteration if there are no input files for it). Shortcut for getinputfiles()""" if isinstance(outputfile, CLAMOutputFile): outputfilename = str(outputfile).replace(os.path.join(self.projectpath, 'output/'), '') # depends on [control=['if'], data=[]] else: outputfilename = outputfile if outputfilename not in self: raise KeyError('No such outputfile ' + outputfilename) # depends on [control=['if'], data=['outputfilename']] try: return next(self.getinputfiles(outputfile, loadmetadata, client, requiremetadata)) # depends on [control=['try'], data=[]] except StopIteration: raise StopIteration('No input files for outputfile ' + outputfilename) # depends on [control=['except'], data=[]]
def switch_keystone_provider(request, keystone_provider=None, redirect_field_name=auth.REDIRECT_FIELD_NAME): """Switches the user's keystone provider using K2K Federation If keystone_provider is given then we switch the user to the keystone provider using K2K federation. Otherwise if keystone_provider is None then we switch the user back to the Identity Provider Keystone which a non federated token auth will be used. """ base_token = request.session.get('k2k_base_unscoped_token', None) k2k_auth_url = request.session.get('k2k_auth_url', None) keystone_providers = request.session.get('keystone_providers', None) recent_project = request.COOKIES.get('recent_project') if not base_token or not k2k_auth_url: msg = _('K2K Federation not setup for this session') raise exceptions.KeystoneAuthException(msg) redirect_to = request.GET.get(redirect_field_name, '') if not is_safe_url(url=redirect_to, host=request.get_host()): redirect_to = settings.LOGIN_REDIRECT_URL unscoped_auth_ref = None keystone_idp_id = getattr( settings, 'KEYSTONE_PROVIDER_IDP_ID', 'localkeystone') if keystone_provider == keystone_idp_id: current_plugin = plugin.TokenPlugin() unscoped_auth = current_plugin.get_plugin(auth_url=k2k_auth_url, token=base_token) else: # Switch to service provider using K2K federation plugins = [plugin.TokenPlugin()] current_plugin = plugin.K2KAuthPlugin() unscoped_auth = current_plugin.get_plugin( auth_url=k2k_auth_url, service_provider=keystone_provider, plugins=plugins, token=base_token, recent_project=recent_project) try: # Switch to identity provider using token auth unscoped_auth_ref = current_plugin.get_access_info(unscoped_auth) except exceptions.KeystoneAuthException as exc: msg = 'Switching to Keystone Provider %s has failed. %s' \ % (keystone_provider, (six.text_type(exc))) messages.error(request, msg) if unscoped_auth_ref: try: request.user = auth.authenticate( request=request, auth_url=unscoped_auth.auth_url, token=unscoped_auth_ref.auth_token) except exceptions.KeystoneAuthException as exc: msg = 'Keystone provider switch failed: %s' % six.text_type(exc) res = django_http.HttpResponseRedirect(settings.LOGIN_URL) res.set_cookie('logout_reason', msg, max_age=10) return res auth.login(request, request.user) auth_user.set_session_from_user(request, request.user) request.session['keystone_provider_id'] = keystone_provider request.session['keystone_providers'] = keystone_providers request.session['k2k_base_unscoped_token'] = base_token request.session['k2k_auth_url'] = k2k_auth_url message = ( _('Switch to Keystone Provider "%(keystone_provider)s" ' 'successful.') % {'keystone_provider': keystone_provider}) messages.success(request, message) response = shortcuts.redirect(redirect_to) return response
def function[switch_keystone_provider, parameter[request, keystone_provider, redirect_field_name]]: constant[Switches the user's keystone provider using K2K Federation If keystone_provider is given then we switch the user to the keystone provider using K2K federation. Otherwise if keystone_provider is None then we switch the user back to the Identity Provider Keystone which a non federated token auth will be used. ] variable[base_token] assign[=] call[name[request].session.get, parameter[constant[k2k_base_unscoped_token], constant[None]]] variable[k2k_auth_url] assign[=] call[name[request].session.get, parameter[constant[k2k_auth_url], constant[None]]] variable[keystone_providers] assign[=] call[name[request].session.get, parameter[constant[keystone_providers], constant[None]]] variable[recent_project] assign[=] call[name[request].COOKIES.get, parameter[constant[recent_project]]] if <ast.BoolOp object at 0x7da1b1916710> begin[:] variable[msg] assign[=] call[name[_], parameter[constant[K2K Federation not setup for this session]]] <ast.Raise object at 0x7da1b1917be0> variable[redirect_to] assign[=] call[name[request].GET.get, parameter[name[redirect_field_name], constant[]]] if <ast.UnaryOp object at 0x7da1b194c2b0> begin[:] variable[redirect_to] assign[=] name[settings].LOGIN_REDIRECT_URL variable[unscoped_auth_ref] assign[=] constant[None] variable[keystone_idp_id] assign[=] call[name[getattr], parameter[name[settings], constant[KEYSTONE_PROVIDER_IDP_ID], constant[localkeystone]]] if compare[name[keystone_provider] equal[==] name[keystone_idp_id]] begin[:] variable[current_plugin] assign[=] call[name[plugin].TokenPlugin, parameter[]] variable[unscoped_auth] assign[=] call[name[current_plugin].get_plugin, parameter[]] <ast.Try object at 0x7da1b194e0b0> if name[unscoped_auth_ref] begin[:] <ast.Try object at 0x7da1b19853f0> call[name[auth].login, parameter[name[request], name[request].user]] call[name[auth_user].set_session_from_user, parameter[name[request], name[request].user]] call[name[request].session][constant[keystone_provider_id]] assign[=] name[keystone_provider] call[name[request].session][constant[keystone_providers]] assign[=] name[keystone_providers] call[name[request].session][constant[k2k_base_unscoped_token]] assign[=] name[base_token] call[name[request].session][constant[k2k_auth_url]] assign[=] name[k2k_auth_url] variable[message] assign[=] binary_operation[call[name[_], parameter[constant[Switch to Keystone Provider "%(keystone_provider)s" successful.]]] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b19873a0>], [<ast.Name object at 0x7da1b1987310>]]] call[name[messages].success, parameter[name[request], name[message]]] variable[response] assign[=] call[name[shortcuts].redirect, parameter[name[redirect_to]]] return[name[response]]
keyword[def] identifier[switch_keystone_provider] ( identifier[request] , identifier[keystone_provider] = keyword[None] , identifier[redirect_field_name] = identifier[auth] . identifier[REDIRECT_FIELD_NAME] ): literal[string] identifier[base_token] = identifier[request] . identifier[session] . identifier[get] ( literal[string] , keyword[None] ) identifier[k2k_auth_url] = identifier[request] . identifier[session] . identifier[get] ( literal[string] , keyword[None] ) identifier[keystone_providers] = identifier[request] . identifier[session] . identifier[get] ( literal[string] , keyword[None] ) identifier[recent_project] = identifier[request] . identifier[COOKIES] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[base_token] keyword[or] keyword[not] identifier[k2k_auth_url] : identifier[msg] = identifier[_] ( literal[string] ) keyword[raise] identifier[exceptions] . identifier[KeystoneAuthException] ( identifier[msg] ) identifier[redirect_to] = identifier[request] . identifier[GET] . identifier[get] ( identifier[redirect_field_name] , literal[string] ) keyword[if] keyword[not] identifier[is_safe_url] ( identifier[url] = identifier[redirect_to] , identifier[host] = identifier[request] . identifier[get_host] ()): identifier[redirect_to] = identifier[settings] . identifier[LOGIN_REDIRECT_URL] identifier[unscoped_auth_ref] = keyword[None] identifier[keystone_idp_id] = identifier[getattr] ( identifier[settings] , literal[string] , literal[string] ) keyword[if] identifier[keystone_provider] == identifier[keystone_idp_id] : identifier[current_plugin] = identifier[plugin] . identifier[TokenPlugin] () identifier[unscoped_auth] = identifier[current_plugin] . identifier[get_plugin] ( identifier[auth_url] = identifier[k2k_auth_url] , identifier[token] = identifier[base_token] ) keyword[else] : identifier[plugins] =[ identifier[plugin] . identifier[TokenPlugin] ()] identifier[current_plugin] = identifier[plugin] . identifier[K2KAuthPlugin] () identifier[unscoped_auth] = identifier[current_plugin] . identifier[get_plugin] ( identifier[auth_url] = identifier[k2k_auth_url] , identifier[service_provider] = identifier[keystone_provider] , identifier[plugins] = identifier[plugins] , identifier[token] = identifier[base_token] , identifier[recent_project] = identifier[recent_project] ) keyword[try] : identifier[unscoped_auth_ref] = identifier[current_plugin] . identifier[get_access_info] ( identifier[unscoped_auth] ) keyword[except] identifier[exceptions] . identifier[KeystoneAuthException] keyword[as] identifier[exc] : identifier[msg] = literal[string] %( identifier[keystone_provider] ,( identifier[six] . identifier[text_type] ( identifier[exc] ))) identifier[messages] . identifier[error] ( identifier[request] , identifier[msg] ) keyword[if] identifier[unscoped_auth_ref] : keyword[try] : identifier[request] . identifier[user] = identifier[auth] . identifier[authenticate] ( identifier[request] = identifier[request] , identifier[auth_url] = identifier[unscoped_auth] . identifier[auth_url] , identifier[token] = identifier[unscoped_auth_ref] . identifier[auth_token] ) keyword[except] identifier[exceptions] . identifier[KeystoneAuthException] keyword[as] identifier[exc] : identifier[msg] = literal[string] % identifier[six] . identifier[text_type] ( identifier[exc] ) identifier[res] = identifier[django_http] . identifier[HttpResponseRedirect] ( identifier[settings] . identifier[LOGIN_URL] ) identifier[res] . identifier[set_cookie] ( literal[string] , identifier[msg] , identifier[max_age] = literal[int] ) keyword[return] identifier[res] identifier[auth] . identifier[login] ( identifier[request] , identifier[request] . identifier[user] ) identifier[auth_user] . identifier[set_session_from_user] ( identifier[request] , identifier[request] . identifier[user] ) identifier[request] . identifier[session] [ literal[string] ]= identifier[keystone_provider] identifier[request] . identifier[session] [ literal[string] ]= identifier[keystone_providers] identifier[request] . identifier[session] [ literal[string] ]= identifier[base_token] identifier[request] . identifier[session] [ literal[string] ]= identifier[k2k_auth_url] identifier[message] =( identifier[_] ( literal[string] literal[string] )%{ literal[string] : identifier[keystone_provider] }) identifier[messages] . identifier[success] ( identifier[request] , identifier[message] ) identifier[response] = identifier[shortcuts] . identifier[redirect] ( identifier[redirect_to] ) keyword[return] identifier[response]
def switch_keystone_provider(request, keystone_provider=None, redirect_field_name=auth.REDIRECT_FIELD_NAME): """Switches the user's keystone provider using K2K Federation If keystone_provider is given then we switch the user to the keystone provider using K2K federation. Otherwise if keystone_provider is None then we switch the user back to the Identity Provider Keystone which a non federated token auth will be used. """ base_token = request.session.get('k2k_base_unscoped_token', None) k2k_auth_url = request.session.get('k2k_auth_url', None) keystone_providers = request.session.get('keystone_providers', None) recent_project = request.COOKIES.get('recent_project') if not base_token or not k2k_auth_url: msg = _('K2K Federation not setup for this session') raise exceptions.KeystoneAuthException(msg) # depends on [control=['if'], data=[]] redirect_to = request.GET.get(redirect_field_name, '') if not is_safe_url(url=redirect_to, host=request.get_host()): redirect_to = settings.LOGIN_REDIRECT_URL # depends on [control=['if'], data=[]] unscoped_auth_ref = None keystone_idp_id = getattr(settings, 'KEYSTONE_PROVIDER_IDP_ID', 'localkeystone') if keystone_provider == keystone_idp_id: current_plugin = plugin.TokenPlugin() unscoped_auth = current_plugin.get_plugin(auth_url=k2k_auth_url, token=base_token) # depends on [control=['if'], data=[]] else: # Switch to service provider using K2K federation plugins = [plugin.TokenPlugin()] current_plugin = plugin.K2KAuthPlugin() unscoped_auth = current_plugin.get_plugin(auth_url=k2k_auth_url, service_provider=keystone_provider, plugins=plugins, token=base_token, recent_project=recent_project) try: # Switch to identity provider using token auth unscoped_auth_ref = current_plugin.get_access_info(unscoped_auth) # depends on [control=['try'], data=[]] except exceptions.KeystoneAuthException as exc: msg = 'Switching to Keystone Provider %s has failed. %s' % (keystone_provider, six.text_type(exc)) messages.error(request, msg) # depends on [control=['except'], data=['exc']] if unscoped_auth_ref: try: request.user = auth.authenticate(request=request, auth_url=unscoped_auth.auth_url, token=unscoped_auth_ref.auth_token) # depends on [control=['try'], data=[]] except exceptions.KeystoneAuthException as exc: msg = 'Keystone provider switch failed: %s' % six.text_type(exc) res = django_http.HttpResponseRedirect(settings.LOGIN_URL) res.set_cookie('logout_reason', msg, max_age=10) return res # depends on [control=['except'], data=['exc']] auth.login(request, request.user) auth_user.set_session_from_user(request, request.user) request.session['keystone_provider_id'] = keystone_provider request.session['keystone_providers'] = keystone_providers request.session['k2k_base_unscoped_token'] = base_token request.session['k2k_auth_url'] = k2k_auth_url message = _('Switch to Keystone Provider "%(keystone_provider)s" successful.') % {'keystone_provider': keystone_provider} messages.success(request, message) # depends on [control=['if'], data=[]] response = shortcuts.redirect(redirect_to) return response
def rating_count(obj): """ Total amount of users who have submitted a positive rating for this object. Usage: {% rating_count obj %} """ count = Rating.objects.filter( object_id=obj.pk, content_type=ContentType.objects.get_for_model(obj), ).exclude(rating=0).count() return count
def function[rating_count, parameter[obj]]: constant[ Total amount of users who have submitted a positive rating for this object. Usage: {% rating_count obj %} ] variable[count] assign[=] call[call[call[name[Rating].objects.filter, parameter[]].exclude, parameter[]].count, parameter[]] return[name[count]]
keyword[def] identifier[rating_count] ( identifier[obj] ): literal[string] identifier[count] = identifier[Rating] . identifier[objects] . identifier[filter] ( identifier[object_id] = identifier[obj] . identifier[pk] , identifier[content_type] = identifier[ContentType] . identifier[objects] . identifier[get_for_model] ( identifier[obj] ), ). identifier[exclude] ( identifier[rating] = literal[int] ). identifier[count] () keyword[return] identifier[count]
def rating_count(obj): """ Total amount of users who have submitted a positive rating for this object. Usage: {% rating_count obj %} """ count = Rating.objects.filter(object_id=obj.pk, content_type=ContentType.objects.get_for_model(obj)).exclude(rating=0).count() return count
async def text(self, *, encoding: Optional[str]=None) -> str: """Like read(), but assumes that body part contains text data.""" data = await self.read(decode=True) # see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm # NOQA # and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send # NOQA encoding = encoding or self.get_charset(default='utf-8') return data.decode(encoding)
<ast.AsyncFunctionDef object at 0x7da1b1f75210>
keyword[async] keyword[def] identifier[text] ( identifier[self] ,*, identifier[encoding] : identifier[Optional] [ identifier[str] ]= keyword[None] )-> identifier[str] : literal[string] identifier[data] = keyword[await] identifier[self] . identifier[read] ( identifier[decode] = keyword[True] ) identifier[encoding] = identifier[encoding] keyword[or] identifier[self] . identifier[get_charset] ( identifier[default] = literal[string] ) keyword[return] identifier[data] . identifier[decode] ( identifier[encoding] )
async def text(self, *, encoding: Optional[str]=None) -> str: """Like read(), but assumes that body part contains text data.""" data = await self.read(decode=True) # see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm # NOQA # and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send # NOQA encoding = encoding or self.get_charset(default='utf-8') return data.decode(encoding)
def handle_cmd(self, command, application): """Handle running a given dot command from a user. :type command: str :param command: The full dot command string, e.g. ``.edit``, of ``.profile prod``. :type application: AWSShell :param application: The application object. """ parts = command.split() cmd_name = parts[0][1:] if cmd_name not in self.HANDLER_CLASSES: self._unknown_cmd(parts, application) else: # Note we expect the class to support no-arg # instantiation. return self.HANDLER_CLASSES[cmd_name]().run(parts, application)
def function[handle_cmd, parameter[self, command, application]]: constant[Handle running a given dot command from a user. :type command: str :param command: The full dot command string, e.g. ``.edit``, of ``.profile prod``. :type application: AWSShell :param application: The application object. ] variable[parts] assign[=] call[name[command].split, parameter[]] variable[cmd_name] assign[=] call[call[name[parts]][constant[0]]][<ast.Slice object at 0x7da18f723f40>] if compare[name[cmd_name] <ast.NotIn object at 0x7da2590d7190> name[self].HANDLER_CLASSES] begin[:] call[name[self]._unknown_cmd, parameter[name[parts], name[application]]]
keyword[def] identifier[handle_cmd] ( identifier[self] , identifier[command] , identifier[application] ): literal[string] identifier[parts] = identifier[command] . identifier[split] () identifier[cmd_name] = identifier[parts] [ literal[int] ][ literal[int] :] keyword[if] identifier[cmd_name] keyword[not] keyword[in] identifier[self] . identifier[HANDLER_CLASSES] : identifier[self] . identifier[_unknown_cmd] ( identifier[parts] , identifier[application] ) keyword[else] : keyword[return] identifier[self] . identifier[HANDLER_CLASSES] [ identifier[cmd_name] ](). identifier[run] ( identifier[parts] , identifier[application] )
def handle_cmd(self, command, application): """Handle running a given dot command from a user. :type command: str :param command: The full dot command string, e.g. ``.edit``, of ``.profile prod``. :type application: AWSShell :param application: The application object. """ parts = command.split() cmd_name = parts[0][1:] if cmd_name not in self.HANDLER_CLASSES: self._unknown_cmd(parts, application) # depends on [control=['if'], data=[]] else: # Note we expect the class to support no-arg # instantiation. return self.HANDLER_CLASSES[cmd_name]().run(parts, application)
def flush_pending(function): """Attempt to acquire any pending locks. """ s = boto3.Session() client = s.client('lambda') results = client.invoke( FunctionName=function, Payload=json.dumps({'detail-type': 'Scheduled Event'}) ) content = results.pop('Payload').read() pprint.pprint(results) pprint.pprint(json.loads(content))
def function[flush_pending, parameter[function]]: constant[Attempt to acquire any pending locks. ] variable[s] assign[=] call[name[boto3].Session, parameter[]] variable[client] assign[=] call[name[s].client, parameter[constant[lambda]]] variable[results] assign[=] call[name[client].invoke, parameter[]] variable[content] assign[=] call[call[name[results].pop, parameter[constant[Payload]]].read, parameter[]] call[name[pprint].pprint, parameter[name[results]]] call[name[pprint].pprint, parameter[call[name[json].loads, parameter[name[content]]]]]
keyword[def] identifier[flush_pending] ( identifier[function] ): literal[string] identifier[s] = identifier[boto3] . identifier[Session] () identifier[client] = identifier[s] . identifier[client] ( literal[string] ) identifier[results] = identifier[client] . identifier[invoke] ( identifier[FunctionName] = identifier[function] , identifier[Payload] = identifier[json] . identifier[dumps] ({ literal[string] : literal[string] }) ) identifier[content] = identifier[results] . identifier[pop] ( literal[string] ). identifier[read] () identifier[pprint] . identifier[pprint] ( identifier[results] ) identifier[pprint] . identifier[pprint] ( identifier[json] . identifier[loads] ( identifier[content] ))
def flush_pending(function): """Attempt to acquire any pending locks. """ s = boto3.Session() client = s.client('lambda') results = client.invoke(FunctionName=function, Payload=json.dumps({'detail-type': 'Scheduled Event'})) content = results.pop('Payload').read() pprint.pprint(results) pprint.pprint(json.loads(content))
def _iter_keys(key): """! Iterate over subkeys of a key """ for i in range(winreg.QueryInfoKey(key)[0]): yield winreg.OpenKey(key, winreg.EnumKey(key, i))
def function[_iter_keys, parameter[key]]: constant[! Iterate over subkeys of a key ] for taget[name[i]] in starred[call[name[range], parameter[call[call[name[winreg].QueryInfoKey, parameter[name[key]]]][constant[0]]]]] begin[:] <ast.Yield object at 0x7da18ede5570>
keyword[def] identifier[_iter_keys] ( identifier[key] ): literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[winreg] . identifier[QueryInfoKey] ( identifier[key] )[ literal[int] ]): keyword[yield] identifier[winreg] . identifier[OpenKey] ( identifier[key] , identifier[winreg] . identifier[EnumKey] ( identifier[key] , identifier[i] ))
def _iter_keys(key): """! Iterate over subkeys of a key """ for i in range(winreg.QueryInfoKey(key)[0]): yield winreg.OpenKey(key, winreg.EnumKey(key, i)) # depends on [control=['for'], data=['i']]
def setup_interpreter(distributions, interpreter=None): """Return an interpreter configured with vendored distributions as extras. Any distributions that are present in the vendored set will be added to the interpreter as extras. :param distributions: The names of distributions to setup the interpreter with. :type distributions: list of str :param interpreter: An optional interpreter to configure. If ``None``, the current interpreter is used. :type interpreter: :class:`pex.interpreter.PythonInterpreter` :return: An bare interpreter configured with vendored extras. :rtype: :class:`pex.interpreter.PythonInterpreter` """ from pex.interpreter import PythonInterpreter interpreter = interpreter or PythonInterpreter.get() for dist in _vendored_dists(OrderedSet(distributions)): interpreter = interpreter.with_extra(dist.key, dist.version, dist.location) return interpreter
def function[setup_interpreter, parameter[distributions, interpreter]]: constant[Return an interpreter configured with vendored distributions as extras. Any distributions that are present in the vendored set will be added to the interpreter as extras. :param distributions: The names of distributions to setup the interpreter with. :type distributions: list of str :param interpreter: An optional interpreter to configure. If ``None``, the current interpreter is used. :type interpreter: :class:`pex.interpreter.PythonInterpreter` :return: An bare interpreter configured with vendored extras. :rtype: :class:`pex.interpreter.PythonInterpreter` ] from relative_module[pex.interpreter] import module[PythonInterpreter] variable[interpreter] assign[=] <ast.BoolOp object at 0x7da204620f40> for taget[name[dist]] in starred[call[name[_vendored_dists], parameter[call[name[OrderedSet], parameter[name[distributions]]]]]] begin[:] variable[interpreter] assign[=] call[name[interpreter].with_extra, parameter[name[dist].key, name[dist].version, name[dist].location]] return[name[interpreter]]
keyword[def] identifier[setup_interpreter] ( identifier[distributions] , identifier[interpreter] = keyword[None] ): literal[string] keyword[from] identifier[pex] . identifier[interpreter] keyword[import] identifier[PythonInterpreter] identifier[interpreter] = identifier[interpreter] keyword[or] identifier[PythonInterpreter] . identifier[get] () keyword[for] identifier[dist] keyword[in] identifier[_vendored_dists] ( identifier[OrderedSet] ( identifier[distributions] )): identifier[interpreter] = identifier[interpreter] . identifier[with_extra] ( identifier[dist] . identifier[key] , identifier[dist] . identifier[version] , identifier[dist] . identifier[location] ) keyword[return] identifier[interpreter]
def setup_interpreter(distributions, interpreter=None): """Return an interpreter configured with vendored distributions as extras. Any distributions that are present in the vendored set will be added to the interpreter as extras. :param distributions: The names of distributions to setup the interpreter with. :type distributions: list of str :param interpreter: An optional interpreter to configure. If ``None``, the current interpreter is used. :type interpreter: :class:`pex.interpreter.PythonInterpreter` :return: An bare interpreter configured with vendored extras. :rtype: :class:`pex.interpreter.PythonInterpreter` """ from pex.interpreter import PythonInterpreter interpreter = interpreter or PythonInterpreter.get() for dist in _vendored_dists(OrderedSet(distributions)): interpreter = interpreter.with_extra(dist.key, dist.version, dist.location) # depends on [control=['for'], data=['dist']] return interpreter
def load(): """Read data from a text file on disk.""" # Get the data file relative to this file's location... datadir = os.path.dirname(__file__) filename = os.path.join(datadir, 'angelier_data.txt') data = [] with open(filename, 'r') as infile: for line in infile: # Skip comments if line.startswith('#'): continue # First column: strike, second: dip, third: rake. strike, dip, rake = line.strip().split() if rake[-1].isalpha(): # If there's a directional letter on the rake column, parse it # normally. strike, dip, rake = mplstereonet.parse_rake(strike, dip, rake) else: # Otherwise, it's actually an azimuthal measurement of the # slickenslide directions, so we need to convert it to a rake. strike, dip = mplstereonet.parse_strike_dip(strike, dip) azimuth = float(rake) rake = mplstereonet.azimuth2rake(strike, dip, azimuth) data.append([strike, dip, rake]) # Separate the columns back out strike, dip, rake = zip(*data) return strike, dip, rake
def function[load, parameter[]]: constant[Read data from a text file on disk.] variable[datadir] assign[=] call[name[os].path.dirname, parameter[name[__file__]]] variable[filename] assign[=] call[name[os].path.join, parameter[name[datadir], constant[angelier_data.txt]]] variable[data] assign[=] list[[]] with call[name[open], parameter[name[filename], constant[r]]] begin[:] for taget[name[line]] in starred[name[infile]] begin[:] if call[name[line].startswith, parameter[constant[#]]] begin[:] continue <ast.Tuple object at 0x7da20cabece0> assign[=] call[call[name[line].strip, parameter[]].split, parameter[]] if call[call[name[rake]][<ast.UnaryOp object at 0x7da20cabd7e0>].isalpha, parameter[]] begin[:] <ast.Tuple object at 0x7da20cabdff0> assign[=] call[name[mplstereonet].parse_rake, parameter[name[strike], name[dip], name[rake]]] call[name[data].append, parameter[list[[<ast.Name object at 0x7da20cabc520>, <ast.Name object at 0x7da20cabf940>, <ast.Name object at 0x7da20cabce80>]]]] <ast.Tuple object at 0x7da18bc718d0> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da18bc73b20>]] return[tuple[[<ast.Name object at 0x7da18bc731f0>, <ast.Name object at 0x7da18bc71660>, <ast.Name object at 0x7da18bc73460>]]]
keyword[def] identifier[load] (): literal[string] identifier[datadir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ) identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[datadir] , literal[string] ) identifier[data] =[] keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[infile] : keyword[for] identifier[line] keyword[in] identifier[infile] : keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[continue] identifier[strike] , identifier[dip] , identifier[rake] = identifier[line] . identifier[strip] (). identifier[split] () keyword[if] identifier[rake] [- literal[int] ]. identifier[isalpha] (): identifier[strike] , identifier[dip] , identifier[rake] = identifier[mplstereonet] . identifier[parse_rake] ( identifier[strike] , identifier[dip] , identifier[rake] ) keyword[else] : identifier[strike] , identifier[dip] = identifier[mplstereonet] . identifier[parse_strike_dip] ( identifier[strike] , identifier[dip] ) identifier[azimuth] = identifier[float] ( identifier[rake] ) identifier[rake] = identifier[mplstereonet] . identifier[azimuth2rake] ( identifier[strike] , identifier[dip] , identifier[azimuth] ) identifier[data] . identifier[append] ([ identifier[strike] , identifier[dip] , identifier[rake] ]) identifier[strike] , identifier[dip] , identifier[rake] = identifier[zip] (* identifier[data] ) keyword[return] identifier[strike] , identifier[dip] , identifier[rake]
def load(): """Read data from a text file on disk.""" # Get the data file relative to this file's location... datadir = os.path.dirname(__file__) filename = os.path.join(datadir, 'angelier_data.txt') data = [] with open(filename, 'r') as infile: for line in infile: # Skip comments if line.startswith('#'): continue # depends on [control=['if'], data=[]] # First column: strike, second: dip, third: rake. (strike, dip, rake) = line.strip().split() if rake[-1].isalpha(): # If there's a directional letter on the rake column, parse it # normally. (strike, dip, rake) = mplstereonet.parse_rake(strike, dip, rake) # depends on [control=['if'], data=[]] else: # Otherwise, it's actually an azimuthal measurement of the # slickenslide directions, so we need to convert it to a rake. (strike, dip) = mplstereonet.parse_strike_dip(strike, dip) azimuth = float(rake) rake = mplstereonet.azimuth2rake(strike, dip, azimuth) data.append([strike, dip, rake]) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['infile']] # Separate the columns back out (strike, dip, rake) = zip(*data) return (strike, dip, rake)
def page_prev(self): """Go to the previous page.""" window_start = (self.parent.value('window_start') - self.parent.value('window_length')) if window_start < 0: return self.parent.overview.update_position(window_start)
def function[page_prev, parameter[self]]: constant[Go to the previous page.] variable[window_start] assign[=] binary_operation[call[name[self].parent.value, parameter[constant[window_start]]] - call[name[self].parent.value, parameter[constant[window_length]]]] if compare[name[window_start] less[<] constant[0]] begin[:] return[None] call[name[self].parent.overview.update_position, parameter[name[window_start]]]
keyword[def] identifier[page_prev] ( identifier[self] ): literal[string] identifier[window_start] =( identifier[self] . identifier[parent] . identifier[value] ( literal[string] )- identifier[self] . identifier[parent] . identifier[value] ( literal[string] )) keyword[if] identifier[window_start] < literal[int] : keyword[return] identifier[self] . identifier[parent] . identifier[overview] . identifier[update_position] ( identifier[window_start] )
def page_prev(self): """Go to the previous page.""" window_start = self.parent.value('window_start') - self.parent.value('window_length') if window_start < 0: return # depends on [control=['if'], data=[]] self.parent.overview.update_position(window_start)
def cut_range(string): """ A custom argparse 'type' to deal with sequences ranges such as 5:500. Returns a 0-based slice corresponding to the selection defined by the slice """ value_range = string.split(':') if len(value_range) == 1: start = int(value_range[0]) stop = start elif len(value_range) == 2: start, stop = tuple(int(i) if i else None for i in value_range) else: msg = "{0} is not a valid, 1-indexed range.".format(string) raise argparse.ArgumentTypeError(msg) if start == 0 or (stop or sys.maxsize) < (start or 0): msg = "{0} is not a valid, 1-indexed range.".format(string) raise argparse.ArgumentTypeError(msg) # Convert from 1-indexed to 0-indexed if start is not None and start > 0: start -= 1 return slice(start, stop)
def function[cut_range, parameter[string]]: constant[ A custom argparse 'type' to deal with sequences ranges such as 5:500. Returns a 0-based slice corresponding to the selection defined by the slice ] variable[value_range] assign[=] call[name[string].split, parameter[constant[:]]] if compare[call[name[len], parameter[name[value_range]]] equal[==] constant[1]] begin[:] variable[start] assign[=] call[name[int], parameter[call[name[value_range]][constant[0]]]] variable[stop] assign[=] name[start] if <ast.BoolOp object at 0x7da1b1a28df0> begin[:] variable[msg] assign[=] call[constant[{0} is not a valid, 1-indexed range.].format, parameter[name[string]]] <ast.Raise object at 0x7da1b1a2af80> if <ast.BoolOp object at 0x7da1b1a297e0> begin[:] <ast.AugAssign object at 0x7da1b1a2afb0> return[call[name[slice], parameter[name[start], name[stop]]]]
keyword[def] identifier[cut_range] ( identifier[string] ): literal[string] identifier[value_range] = identifier[string] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[value_range] )== literal[int] : identifier[start] = identifier[int] ( identifier[value_range] [ literal[int] ]) identifier[stop] = identifier[start] keyword[elif] identifier[len] ( identifier[value_range] )== literal[int] : identifier[start] , identifier[stop] = identifier[tuple] ( identifier[int] ( identifier[i] ) keyword[if] identifier[i] keyword[else] keyword[None] keyword[for] identifier[i] keyword[in] identifier[value_range] ) keyword[else] : identifier[msg] = literal[string] . identifier[format] ( identifier[string] ) keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] ( identifier[msg] ) keyword[if] identifier[start] == literal[int] keyword[or] ( identifier[stop] keyword[or] identifier[sys] . identifier[maxsize] )<( identifier[start] keyword[or] literal[int] ): identifier[msg] = literal[string] . identifier[format] ( identifier[string] ) keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] ( identifier[msg] ) keyword[if] identifier[start] keyword[is] keyword[not] keyword[None] keyword[and] identifier[start] > literal[int] : identifier[start] -= literal[int] keyword[return] identifier[slice] ( identifier[start] , identifier[stop] )
def cut_range(string): """ A custom argparse 'type' to deal with sequences ranges such as 5:500. Returns a 0-based slice corresponding to the selection defined by the slice """ value_range = string.split(':') if len(value_range) == 1: start = int(value_range[0]) stop = start # depends on [control=['if'], data=[]] elif len(value_range) == 2: (start, stop) = tuple((int(i) if i else None for i in value_range)) # depends on [control=['if'], data=[]] else: msg = '{0} is not a valid, 1-indexed range.'.format(string) raise argparse.ArgumentTypeError(msg) if start == 0 or (stop or sys.maxsize) < (start or 0): msg = '{0} is not a valid, 1-indexed range.'.format(string) raise argparse.ArgumentTypeError(msg) # depends on [control=['if'], data=[]] # Convert from 1-indexed to 0-indexed if start is not None and start > 0: start -= 1 # depends on [control=['if'], data=[]] return slice(start, stop)
def node_from_ini(ini_file, nodefactory=Node, root_name='ini'): """ Convert a .ini file into a Node object. :param ini_file: a filename or a file like object in read mode """ fileobj = open(ini_file) if isinstance(ini_file, str) else ini_file cfp = configparser.RawConfigParser() cfp.read_file(fileobj) root = nodefactory(root_name) sections = cfp.sections() for section in sections: params = dict(cfp.items(section)) root.append(Node(section, params)) return root
def function[node_from_ini, parameter[ini_file, nodefactory, root_name]]: constant[ Convert a .ini file into a Node object. :param ini_file: a filename or a file like object in read mode ] variable[fileobj] assign[=] <ast.IfExp object at 0x7da207f02da0> variable[cfp] assign[=] call[name[configparser].RawConfigParser, parameter[]] call[name[cfp].read_file, parameter[name[fileobj]]] variable[root] assign[=] call[name[nodefactory], parameter[name[root_name]]] variable[sections] assign[=] call[name[cfp].sections, parameter[]] for taget[name[section]] in starred[name[sections]] begin[:] variable[params] assign[=] call[name[dict], parameter[call[name[cfp].items, parameter[name[section]]]]] call[name[root].append, parameter[call[name[Node], parameter[name[section], name[params]]]]] return[name[root]]
keyword[def] identifier[node_from_ini] ( identifier[ini_file] , identifier[nodefactory] = identifier[Node] , identifier[root_name] = literal[string] ): literal[string] identifier[fileobj] = identifier[open] ( identifier[ini_file] ) keyword[if] identifier[isinstance] ( identifier[ini_file] , identifier[str] ) keyword[else] identifier[ini_file] identifier[cfp] = identifier[configparser] . identifier[RawConfigParser] () identifier[cfp] . identifier[read_file] ( identifier[fileobj] ) identifier[root] = identifier[nodefactory] ( identifier[root_name] ) identifier[sections] = identifier[cfp] . identifier[sections] () keyword[for] identifier[section] keyword[in] identifier[sections] : identifier[params] = identifier[dict] ( identifier[cfp] . identifier[items] ( identifier[section] )) identifier[root] . identifier[append] ( identifier[Node] ( identifier[section] , identifier[params] )) keyword[return] identifier[root]
def node_from_ini(ini_file, nodefactory=Node, root_name='ini'): """ Convert a .ini file into a Node object. :param ini_file: a filename or a file like object in read mode """ fileobj = open(ini_file) if isinstance(ini_file, str) else ini_file cfp = configparser.RawConfigParser() cfp.read_file(fileobj) root = nodefactory(root_name) sections = cfp.sections() for section in sections: params = dict(cfp.items(section)) root.append(Node(section, params)) # depends on [control=['for'], data=['section']] return root
def vee_map(skew): """Return the vee map of a vector """ vec = 1/2 * np.array([skew[2,1] - skew[1,2], skew[0,2] - skew[2,0], skew[1,0] - skew[0,1]]) return vec
def function[vee_map, parameter[skew]]: constant[Return the vee map of a vector ] variable[vec] assign[=] binary_operation[binary_operation[constant[1] / constant[2]] * call[name[np].array, parameter[list[[<ast.BinOp object at 0x7da1b1a77c10>, <ast.BinOp object at 0x7da1b1b0d2d0>, <ast.BinOp object at 0x7da1b1b0ef20>]]]]] return[name[vec]]
keyword[def] identifier[vee_map] ( identifier[skew] ): literal[string] identifier[vec] = literal[int] / literal[int] * identifier[np] . identifier[array] ([ identifier[skew] [ literal[int] , literal[int] ]- identifier[skew] [ literal[int] , literal[int] ], identifier[skew] [ literal[int] , literal[int] ]- identifier[skew] [ literal[int] , literal[int] ], identifier[skew] [ literal[int] , literal[int] ]- identifier[skew] [ literal[int] , literal[int] ]]) keyword[return] identifier[vec]
def vee_map(skew): """Return the vee map of a vector """ vec = 1 / 2 * np.array([skew[2, 1] - skew[1, 2], skew[0, 2] - skew[2, 0], skew[1, 0] - skew[0, 1]]) return vec
def find_duplicates(filenames, max_size): """Find duplicates in a list of files, comparing up to `max_size` bytes. Returns a 2-tuple of two values: ``(duplicate_groups, errors)``. `duplicate_groups` is a (possibly empty) list of lists: the names of files that have at least two copies, grouped together. `errors` is a list of error messages that occurred. If empty, there were no errors. For example, assuming ``a1`` and ``a2`` are identical, ``c1`` and ``c2`` are identical, and ``b`` is different from all others:: >>> dups, errs = find_duplicates(['a1', 'a2', 'b', 'c1', 'c2'], 1024) >>> dups [['a1', 'a2'], ['c1', 'c2']] >>> errs [] Note that ``b`` is not included in the results, as it has no duplicates. """ errors = [] # shortcut: can't have duplicates if there aren't at least 2 files if len(filenames) < 2: return [], errors # shortcut: if comparing 0 bytes, they're all the same if max_size == 0: return [filenames], errors files_by_md5 = {} for filename in filenames: try: md5 = calculate_md5(filename, max_size) except EnvironmentError as e: msg = "unable to calculate MD5 for '%s': %s" % (filename, e.strerror) sys.stderr.write("%s\n" % msg) errors.append(msg) continue if md5 not in files_by_md5: # unique beginning so far; index it on its own files_by_md5[md5] = [filename] else: # found a potential duplicate (same beginning) files_by_md5[md5].append(filename) # Filter out the unique files (lists of files with the same md5 that # only contain 1 file), and create a list of the lists of duplicates. # Don't use values() because on Python 2 this creates a list of all # values (file lists), and that may be very large. duplicates = [l for l in py3compat.itervalues(files_by_md5) if len(l) >= 2] return duplicates, errors
def function[find_duplicates, parameter[filenames, max_size]]: constant[Find duplicates in a list of files, comparing up to `max_size` bytes. Returns a 2-tuple of two values: ``(duplicate_groups, errors)``. `duplicate_groups` is a (possibly empty) list of lists: the names of files that have at least two copies, grouped together. `errors` is a list of error messages that occurred. If empty, there were no errors. For example, assuming ``a1`` and ``a2`` are identical, ``c1`` and ``c2`` are identical, and ``b`` is different from all others:: >>> dups, errs = find_duplicates(['a1', 'a2', 'b', 'c1', 'c2'], 1024) >>> dups [['a1', 'a2'], ['c1', 'c2']] >>> errs [] Note that ``b`` is not included in the results, as it has no duplicates. ] variable[errors] assign[=] list[[]] if compare[call[name[len], parameter[name[filenames]]] less[<] constant[2]] begin[:] return[tuple[[<ast.List object at 0x7da1b28ae830>, <ast.Name object at 0x7da1b28af1f0>]]] if compare[name[max_size] equal[==] constant[0]] begin[:] return[tuple[[<ast.List object at 0x7da1b28af5b0>, <ast.Name object at 0x7da1b28af610>]]] variable[files_by_md5] assign[=] dictionary[[], []] for taget[name[filename]] in starred[name[filenames]] begin[:] <ast.Try object at 0x7da1b28ac550> if compare[name[md5] <ast.NotIn object at 0x7da2590d7190> name[files_by_md5]] begin[:] call[name[files_by_md5]][name[md5]] assign[=] list[[<ast.Name object at 0x7da1b28adc90>]] variable[duplicates] assign[=] <ast.ListComp object at 0x7da1b28ad8d0> return[tuple[[<ast.Name object at 0x7da1b28af9a0>, <ast.Name object at 0x7da1b28add50>]]]
keyword[def] identifier[find_duplicates] ( identifier[filenames] , identifier[max_size] ): literal[string] identifier[errors] =[] keyword[if] identifier[len] ( identifier[filenames] )< literal[int] : keyword[return] [], identifier[errors] keyword[if] identifier[max_size] == literal[int] : keyword[return] [ identifier[filenames] ], identifier[errors] identifier[files_by_md5] ={} keyword[for] identifier[filename] keyword[in] identifier[filenames] : keyword[try] : identifier[md5] = identifier[calculate_md5] ( identifier[filename] , identifier[max_size] ) keyword[except] identifier[EnvironmentError] keyword[as] identifier[e] : identifier[msg] = literal[string] %( identifier[filename] , identifier[e] . identifier[strerror] ) identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] % identifier[msg] ) identifier[errors] . identifier[append] ( identifier[msg] ) keyword[continue] keyword[if] identifier[md5] keyword[not] keyword[in] identifier[files_by_md5] : identifier[files_by_md5] [ identifier[md5] ]=[ identifier[filename] ] keyword[else] : identifier[files_by_md5] [ identifier[md5] ]. identifier[append] ( identifier[filename] ) identifier[duplicates] =[ identifier[l] keyword[for] identifier[l] keyword[in] identifier[py3compat] . identifier[itervalues] ( identifier[files_by_md5] ) keyword[if] identifier[len] ( identifier[l] )>= literal[int] ] keyword[return] identifier[duplicates] , identifier[errors]
def find_duplicates(filenames, max_size): """Find duplicates in a list of files, comparing up to `max_size` bytes. Returns a 2-tuple of two values: ``(duplicate_groups, errors)``. `duplicate_groups` is a (possibly empty) list of lists: the names of files that have at least two copies, grouped together. `errors` is a list of error messages that occurred. If empty, there were no errors. For example, assuming ``a1`` and ``a2`` are identical, ``c1`` and ``c2`` are identical, and ``b`` is different from all others:: >>> dups, errs = find_duplicates(['a1', 'a2', 'b', 'c1', 'c2'], 1024) >>> dups [['a1', 'a2'], ['c1', 'c2']] >>> errs [] Note that ``b`` is not included in the results, as it has no duplicates. """ errors = [] # shortcut: can't have duplicates if there aren't at least 2 files if len(filenames) < 2: return ([], errors) # depends on [control=['if'], data=[]] # shortcut: if comparing 0 bytes, they're all the same if max_size == 0: return ([filenames], errors) # depends on [control=['if'], data=[]] files_by_md5 = {} for filename in filenames: try: md5 = calculate_md5(filename, max_size) # depends on [control=['try'], data=[]] except EnvironmentError as e: msg = "unable to calculate MD5 for '%s': %s" % (filename, e.strerror) sys.stderr.write('%s\n' % msg) errors.append(msg) continue # depends on [control=['except'], data=['e']] if md5 not in files_by_md5: # unique beginning so far; index it on its own files_by_md5[md5] = [filename] # depends on [control=['if'], data=['md5', 'files_by_md5']] else: # found a potential duplicate (same beginning) files_by_md5[md5].append(filename) # depends on [control=['for'], data=['filename']] # Filter out the unique files (lists of files with the same md5 that # only contain 1 file), and create a list of the lists of duplicates. # Don't use values() because on Python 2 this creates a list of all # values (file lists), and that may be very large. duplicates = [l for l in py3compat.itervalues(files_by_md5) if len(l) >= 2] return (duplicates, errors)
def matches(self, stream): """Check if this selector matches the given stream Args: stream (DataStream): The stream to check Returns: bool: True if this selector matches the stream """ if self.match_type != stream.stream_type: return False if self.match_id is not None: return self.match_id == stream.stream_id if self.match_spec == DataStreamSelector.MatchUserOnly: return not stream.system elif self.match_spec == DataStreamSelector.MatchSystemOnly: return stream.system elif self.match_spec == DataStreamSelector.MatchUserAndBreaks: return (not stream.system) or (stream.system and (stream.stream_id in DataStream.KnownBreakStreams)) # The other case is that match_spec is MatchCombined, which matches everything # regardless of system of user flag return True
def function[matches, parameter[self, stream]]: constant[Check if this selector matches the given stream Args: stream (DataStream): The stream to check Returns: bool: True if this selector matches the stream ] if compare[name[self].match_type not_equal[!=] name[stream].stream_type] begin[:] return[constant[False]] if compare[name[self].match_id is_not constant[None]] begin[:] return[compare[name[self].match_id equal[==] name[stream].stream_id]] if compare[name[self].match_spec equal[==] name[DataStreamSelector].MatchUserOnly] begin[:] return[<ast.UnaryOp object at 0x7da20c6a8a60>] return[constant[True]]
keyword[def] identifier[matches] ( identifier[self] , identifier[stream] ): literal[string] keyword[if] identifier[self] . identifier[match_type] != identifier[stream] . identifier[stream_type] : keyword[return] keyword[False] keyword[if] identifier[self] . identifier[match_id] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[self] . identifier[match_id] == identifier[stream] . identifier[stream_id] keyword[if] identifier[self] . identifier[match_spec] == identifier[DataStreamSelector] . identifier[MatchUserOnly] : keyword[return] keyword[not] identifier[stream] . identifier[system] keyword[elif] identifier[self] . identifier[match_spec] == identifier[DataStreamSelector] . identifier[MatchSystemOnly] : keyword[return] identifier[stream] . identifier[system] keyword[elif] identifier[self] . identifier[match_spec] == identifier[DataStreamSelector] . identifier[MatchUserAndBreaks] : keyword[return] ( keyword[not] identifier[stream] . identifier[system] ) keyword[or] ( identifier[stream] . identifier[system] keyword[and] ( identifier[stream] . identifier[stream_id] keyword[in] identifier[DataStream] . identifier[KnownBreakStreams] )) keyword[return] keyword[True]
def matches(self, stream): """Check if this selector matches the given stream Args: stream (DataStream): The stream to check Returns: bool: True if this selector matches the stream """ if self.match_type != stream.stream_type: return False # depends on [control=['if'], data=[]] if self.match_id is not None: return self.match_id == stream.stream_id # depends on [control=['if'], data=[]] if self.match_spec == DataStreamSelector.MatchUserOnly: return not stream.system # depends on [control=['if'], data=[]] elif self.match_spec == DataStreamSelector.MatchSystemOnly: return stream.system # depends on [control=['if'], data=[]] elif self.match_spec == DataStreamSelector.MatchUserAndBreaks: return not stream.system or (stream.system and stream.stream_id in DataStream.KnownBreakStreams) # depends on [control=['if'], data=[]] # The other case is that match_spec is MatchCombined, which matches everything # regardless of system of user flag return True
def delay(self, methodname, *args, **kwargs): """调用但不要求返回结果,而是通过系统方法getresult来获取. Parameters: methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 """ ID = str(uuid.uuid4()) self.send_query(ID, methodname, False, *args, **kwargs) return ID
def function[delay, parameter[self, methodname]]: constant[调用但不要求返回结果,而是通过系统方法getresult来获取. Parameters: methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 ] variable[ID] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]] call[name[self].send_query, parameter[name[ID], name[methodname], constant[False], <ast.Starred object at 0x7da20c6abc70>]] return[name[ID]]
keyword[def] identifier[delay] ( identifier[self] , identifier[methodname] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[ID] = identifier[str] ( identifier[uuid] . identifier[uuid4] ()) identifier[self] . identifier[send_query] ( identifier[ID] , identifier[methodname] , keyword[False] ,* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[ID]
def delay(self, methodname, *args, **kwargs): """调用但不要求返回结果,而是通过系统方法getresult来获取. Parameters: methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 """ ID = str(uuid.uuid4()) self.send_query(ID, methodname, False, *args, **kwargs) return ID
def individual_dict(self, ind_ids): """Return a dict with ind_id as key and Individual as values.""" ind_dict = {ind.ind_id: ind for ind in self.individuals(ind_ids=ind_ids)} return ind_dict
def function[individual_dict, parameter[self, ind_ids]]: constant[Return a dict with ind_id as key and Individual as values.] variable[ind_dict] assign[=] <ast.DictComp object at 0x7da2044c1a80> return[name[ind_dict]]
keyword[def] identifier[individual_dict] ( identifier[self] , identifier[ind_ids] ): literal[string] identifier[ind_dict] ={ identifier[ind] . identifier[ind_id] : identifier[ind] keyword[for] identifier[ind] keyword[in] identifier[self] . identifier[individuals] ( identifier[ind_ids] = identifier[ind_ids] )} keyword[return] identifier[ind_dict]
def individual_dict(self, ind_ids): """Return a dict with ind_id as key and Individual as values.""" ind_dict = {ind.ind_id: ind for ind in self.individuals(ind_ids=ind_ids)} return ind_dict
def is_collection(self, path, environ): """Return True, if path maps to an existing collection resource. This method should only be used, if no other information is queried for <path>. Otherwise a _DAVResource should be created first. """ res = self.get_resource_inst(path, environ) return res and res.is_collection
def function[is_collection, parameter[self, path, environ]]: constant[Return True, if path maps to an existing collection resource. This method should only be used, if no other information is queried for <path>. Otherwise a _DAVResource should be created first. ] variable[res] assign[=] call[name[self].get_resource_inst, parameter[name[path], name[environ]]] return[<ast.BoolOp object at 0x7da1b0077df0>]
keyword[def] identifier[is_collection] ( identifier[self] , identifier[path] , identifier[environ] ): literal[string] identifier[res] = identifier[self] . identifier[get_resource_inst] ( identifier[path] , identifier[environ] ) keyword[return] identifier[res] keyword[and] identifier[res] . identifier[is_collection]
def is_collection(self, path, environ): """Return True, if path maps to an existing collection resource. This method should only be used, if no other information is queried for <path>. Otherwise a _DAVResource should be created first. """ res = self.get_resource_inst(path, environ) return res and res.is_collection
def entity_type(self, entity_type): """Sets the entity_type of this SavedSearch. The Wavefront entity type over which to search # noqa: E501 :param entity_type: The entity_type of this SavedSearch. # noqa: E501 :type: str """ if entity_type is None: raise ValueError("Invalid value for `entity_type`, must not be `None`") # noqa: E501 allowed_values = ["DASHBOARD", "ALERT", "MAINTENANCE_WINDOW", "NOTIFICANT", "EVENT", "SOURCE", "EXTERNAL_LINK", "AGENT", "CLOUD_INTEGRATION", "APPLICATION", "REGISTERED_QUERY", "USER", "USER_GROUP"] # noqa: E501 if entity_type not in allowed_values: raise ValueError( "Invalid value for `entity_type` ({0}), must be one of {1}" # noqa: E501 .format(entity_type, allowed_values) ) self._entity_type = entity_type
def function[entity_type, parameter[self, entity_type]]: constant[Sets the entity_type of this SavedSearch. The Wavefront entity type over which to search # noqa: E501 :param entity_type: The entity_type of this SavedSearch. # noqa: E501 :type: str ] if compare[name[entity_type] is constant[None]] begin[:] <ast.Raise object at 0x7da18bc703a0> variable[allowed_values] assign[=] list[[<ast.Constant object at 0x7da18bc73910>, <ast.Constant object at 0x7da18bc71420>, <ast.Constant object at 0x7da18bc71750>, <ast.Constant object at 0x7da18bc73af0>, <ast.Constant object at 0x7da18bc71cc0>, <ast.Constant object at 0x7da18bc71330>, <ast.Constant object at 0x7da18bc717e0>, <ast.Constant object at 0x7da18bc71ea0>, <ast.Constant object at 0x7da18bc70040>, <ast.Constant object at 0x7da18bc73310>, <ast.Constant object at 0x7da18bc73e20>, <ast.Constant object at 0x7da18bc72a40>, <ast.Constant object at 0x7da18bc726e0>]] if compare[name[entity_type] <ast.NotIn object at 0x7da2590d7190> name[allowed_values]] begin[:] <ast.Raise object at 0x7da18bc712a0> name[self]._entity_type assign[=] name[entity_type]
keyword[def] identifier[entity_type] ( identifier[self] , identifier[entity_type] ): literal[string] keyword[if] identifier[entity_type] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[allowed_values] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] identifier[entity_type] keyword[not] keyword[in] identifier[allowed_values] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[entity_type] , identifier[allowed_values] ) ) identifier[self] . identifier[_entity_type] = identifier[entity_type]
def entity_type(self, entity_type): """Sets the entity_type of this SavedSearch. The Wavefront entity type over which to search # noqa: E501 :param entity_type: The entity_type of this SavedSearch. # noqa: E501 :type: str """ if entity_type is None: raise ValueError('Invalid value for `entity_type`, must not be `None`') # noqa: E501 # depends on [control=['if'], data=[]] allowed_values = ['DASHBOARD', 'ALERT', 'MAINTENANCE_WINDOW', 'NOTIFICANT', 'EVENT', 'SOURCE', 'EXTERNAL_LINK', 'AGENT', 'CLOUD_INTEGRATION', 'APPLICATION', 'REGISTERED_QUERY', 'USER', 'USER_GROUP'] # noqa: E501 if entity_type not in allowed_values: # noqa: E501 raise ValueError('Invalid value for `entity_type` ({0}), must be one of {1}'.format(entity_type, allowed_values)) # depends on [control=['if'], data=['entity_type', 'allowed_values']] self._entity_type = entity_type
def reset_tip_tracking(self): """ Resets the :any:`Pipette` tip tracking, "refilling" the tip racks """ self.current_tip(None) self.tip_rack_iter = iter([]) if self.has_tip_rack(): iterables = self.tip_racks if self.channels > 1: iterables = [c for rack in self.tip_racks for c in rack.cols] else: iterables = [w for rack in self.tip_racks for w in rack] if self.starting_tip: iterables = iterables[iterables.index(self.starting_tip):] self.tip_rack_iter = itertools.chain(iterables)
def function[reset_tip_tracking, parameter[self]]: constant[ Resets the :any:`Pipette` tip tracking, "refilling" the tip racks ] call[name[self].current_tip, parameter[constant[None]]] name[self].tip_rack_iter assign[=] call[name[iter], parameter[list[[]]]] if call[name[self].has_tip_rack, parameter[]] begin[:] variable[iterables] assign[=] name[self].tip_racks if compare[name[self].channels greater[>] constant[1]] begin[:] variable[iterables] assign[=] <ast.ListComp object at 0x7da1b26afca0> if name[self].starting_tip begin[:] variable[iterables] assign[=] call[name[iterables]][<ast.Slice object at 0x7da2044c3c40>] name[self].tip_rack_iter assign[=] call[name[itertools].chain, parameter[name[iterables]]]
keyword[def] identifier[reset_tip_tracking] ( identifier[self] ): literal[string] identifier[self] . identifier[current_tip] ( keyword[None] ) identifier[self] . identifier[tip_rack_iter] = identifier[iter] ([]) keyword[if] identifier[self] . identifier[has_tip_rack] (): identifier[iterables] = identifier[self] . identifier[tip_racks] keyword[if] identifier[self] . identifier[channels] > literal[int] : identifier[iterables] =[ identifier[c] keyword[for] identifier[rack] keyword[in] identifier[self] . identifier[tip_racks] keyword[for] identifier[c] keyword[in] identifier[rack] . identifier[cols] ] keyword[else] : identifier[iterables] =[ identifier[w] keyword[for] identifier[rack] keyword[in] identifier[self] . identifier[tip_racks] keyword[for] identifier[w] keyword[in] identifier[rack] ] keyword[if] identifier[self] . identifier[starting_tip] : identifier[iterables] = identifier[iterables] [ identifier[iterables] . identifier[index] ( identifier[self] . identifier[starting_tip] ):] identifier[self] . identifier[tip_rack_iter] = identifier[itertools] . identifier[chain] ( identifier[iterables] )
def reset_tip_tracking(self): """ Resets the :any:`Pipette` tip tracking, "refilling" the tip racks """ self.current_tip(None) self.tip_rack_iter = iter([]) if self.has_tip_rack(): iterables = self.tip_racks if self.channels > 1: iterables = [c for rack in self.tip_racks for c in rack.cols] # depends on [control=['if'], data=[]] else: iterables = [w for rack in self.tip_racks for w in rack] if self.starting_tip: iterables = iterables[iterables.index(self.starting_tip):] # depends on [control=['if'], data=[]] self.tip_rack_iter = itertools.chain(iterables) # depends on [control=['if'], data=[]]
def loader(schema, validator=CerberusValidator, update=None): """Create a load function based on schema dict and Validator class. :param schema: a Cerberus schema dict. :param validator: the validator class which must be a subclass of more.cerberus.CerberusValidator which is the default. :param update: will pass the update flag to the validator, when ``True`` the ``required`` rules will not be checked. By default it will be set for PUT and PATCH requests to ``True`` and for other requests to ``False``. You can plug this ``load`` function into a json view. Returns a ``load`` function that takes a request JSON body and uses the schema to validate it. This function raises :class:`more.cerberus.ValidationError` if validation is not successful. """ if not issubclass(validator, CerberusValidator): raise TypeError( "Validator must be a subclass of more.cerberus.CerberusValidator" ) return partial(load, schema, validator, update)
def function[loader, parameter[schema, validator, update]]: constant[Create a load function based on schema dict and Validator class. :param schema: a Cerberus schema dict. :param validator: the validator class which must be a subclass of more.cerberus.CerberusValidator which is the default. :param update: will pass the update flag to the validator, when ``True`` the ``required`` rules will not be checked. By default it will be set for PUT and PATCH requests to ``True`` and for other requests to ``False``. You can plug this ``load`` function into a json view. Returns a ``load`` function that takes a request JSON body and uses the schema to validate it. This function raises :class:`more.cerberus.ValidationError` if validation is not successful. ] if <ast.UnaryOp object at 0x7da1b2529a80> begin[:] <ast.Raise object at 0x7da1b2555090> return[call[name[partial], parameter[name[load], name[schema], name[validator], name[update]]]]
keyword[def] identifier[loader] ( identifier[schema] , identifier[validator] = identifier[CerberusValidator] , identifier[update] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[issubclass] ( identifier[validator] , identifier[CerberusValidator] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[return] identifier[partial] ( identifier[load] , identifier[schema] , identifier[validator] , identifier[update] )
def loader(schema, validator=CerberusValidator, update=None): """Create a load function based on schema dict and Validator class. :param schema: a Cerberus schema dict. :param validator: the validator class which must be a subclass of more.cerberus.CerberusValidator which is the default. :param update: will pass the update flag to the validator, when ``True`` the ``required`` rules will not be checked. By default it will be set for PUT and PATCH requests to ``True`` and for other requests to ``False``. You can plug this ``load`` function into a json view. Returns a ``load`` function that takes a request JSON body and uses the schema to validate it. This function raises :class:`more.cerberus.ValidationError` if validation is not successful. """ if not issubclass(validator, CerberusValidator): raise TypeError('Validator must be a subclass of more.cerberus.CerberusValidator') # depends on [control=['if'], data=[]] return partial(load, schema, validator, update)