Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
6,500
angr/angr
angr/state_plugins/fast_memory.py
SimFastMemory._translate_cond
def _translate_cond(self, c): #pylint:disable=no-self-use """ Checks whether this condition can be supported by FastMemory." """ if isinstance(c, claripy.ast.Base) and not c.singlevalued: raise SimFastMemoryError("size not supported") if c is None: return True else: return self.state.solver.eval_upto(c, 1)[0]
python
def _translate_cond(self, c): #pylint:disable=no-self-use """ Checks whether this condition can be supported by FastMemory." """ if isinstance(c, claripy.ast.Base) and not c.singlevalued: raise SimFastMemoryError("size not supported") if c is None: return True else: return self.state.solver.eval_upto(c, 1)[0]
['def', '_translate_cond', '(', 'self', ',', 'c', ')', ':', '#pylint:disable=no-self-use', 'if', 'isinstance', '(', 'c', ',', 'claripy', '.', 'ast', '.', 'Base', ')', 'and', 'not', 'c', '.', 'singlevalued', ':', 'raise', 'SimFastMemoryError', '(', '"size not supported"', ')', 'if', 'c', 'is', 'None', ':', 'return', 'True', 'else', ':', 'return', 'self', '.', 'state', '.', 'solver', '.', 'eval_upto', '(', 'c', ',', '1', ')', '[', '0', ']']
Checks whether this condition can be supported by FastMemory."
['Checks', 'whether', 'this', 'condition', 'can', 'be', 'supported', 'by', 'FastMemory', '.']
train
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/fast_memory.py#L76-L85
6,501
mdiener/grace
grace/py27/slimit/parser.py
Parser.p_iteration_statement_6
def p_iteration_statement_6(self, p): """ iteration_statement \ : FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement """ p[0] = ast.ForIn(item=ast.VarDecl(identifier=p[4], initializer=p[5]), iterable=p[7], statement=p[9])
python
def p_iteration_statement_6(self, p): """ iteration_statement \ : FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement """ p[0] = ast.ForIn(item=ast.VarDecl(identifier=p[4], initializer=p[5]), iterable=p[7], statement=p[9])
['def', 'p_iteration_statement_6', '(', 'self', ',', 'p', ')', ':', 'p', '[', '0', ']', '=', 'ast', '.', 'ForIn', '(', 'item', '=', 'ast', '.', 'VarDecl', '(', 'identifier', '=', 'p', '[', '4', ']', ',', 'initializer', '=', 'p', '[', '5', ']', ')', ',', 'iterable', '=', 'p', '[', '7', ']', ',', 'statement', '=', 'p', '[', '9', ']', ')']
iteration_statement \ : FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement
['iteration_statement', '\\', ':', 'FOR', 'LPAREN', 'VAR', 'identifier', 'initializer_noin', 'IN', 'expr', 'RPAREN', 'statement']
train
https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/parser.py#L1017-L1023
6,502
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_event_sources
def list_event_sources(self): """ Returns the existing event sources. :rtype: ~collections.Iterable[str] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/events/sources'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.EventSourceInfo() message.ParseFromString(response.content) sources = getattr(message, 'source') return iter(sources)
python
def list_event_sources(self): """ Returns the existing event sources. :rtype: ~collections.Iterable[str] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/events/sources'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.EventSourceInfo() message.ParseFromString(response.content) sources = getattr(message, 'source') return iter(sources)
['def', 'list_event_sources', '(', 'self', ')', ':', '# Server does not do pagination on listings of this resource.', '# Return an iterator anyway for similarity with other API methods', 'path', '=', "'/archive/{}/events/sources'", '.', 'format', '(', 'self', '.', '_instance', ')', 'response', '=', 'self', '.', '_client', '.', 'get_proto', '(', 'path', '=', 'path', ')', 'message', '=', 'archive_pb2', '.', 'EventSourceInfo', '(', ')', 'message', '.', 'ParseFromString', '(', 'response', '.', 'content', ')', 'sources', '=', 'getattr', '(', 'message', ',', "'source'", ')', 'return', 'iter', '(', 'sources', ')']
Returns the existing event sources. :rtype: ~collections.Iterable[str]
['Returns', 'the', 'existing', 'event', 'sources', '.']
train
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L125-L138
6,503
zhmcclient/python-zhmcclient
zhmcclient_mock/_urihandler.py
UserRoleAddPermissionHandler.post
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Add Permission to User Role.""" assert wait_for_completion is True # synchronous operation user_role_oid = uri_parms[0] user_role_uri = '/api/user-roles/' + user_role_oid try: user_role = hmc.lookup_by_uri(user_role_uri) except KeyError: raise InvalidResourceError(method, uri) check_required_fields(method, uri, body, ['permitted-object', 'permitted-object-type']) # Reject if User Role is system-defined: if user_role.properties['type'] == 'system-defined': raise BadRequestError( method, uri, reason=314, message="Cannot add permission to " "system-defined user role: {}".format(user_role_uri)) # Apply defaults, so our internally stored copy has all fields: permission = copy.deepcopy(body) if 'include-members' not in permission: permission['include-members'] = False if 'view-only-mode' not in permission: permission['view-only-mode'] = True # Add the permission to its store (the faked User Role object): if user_role.properties.get('permissions', None) is None: user_role.properties['permissions'] = [] user_role.properties['permissions'].append(permission)
python
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Add Permission to User Role.""" assert wait_for_completion is True # synchronous operation user_role_oid = uri_parms[0] user_role_uri = '/api/user-roles/' + user_role_oid try: user_role = hmc.lookup_by_uri(user_role_uri) except KeyError: raise InvalidResourceError(method, uri) check_required_fields(method, uri, body, ['permitted-object', 'permitted-object-type']) # Reject if User Role is system-defined: if user_role.properties['type'] == 'system-defined': raise BadRequestError( method, uri, reason=314, message="Cannot add permission to " "system-defined user role: {}".format(user_role_uri)) # Apply defaults, so our internally stored copy has all fields: permission = copy.deepcopy(body) if 'include-members' not in permission: permission['include-members'] = False if 'view-only-mode' not in permission: permission['view-only-mode'] = True # Add the permission to its store (the faked User Role object): if user_role.properties.get('permissions', None) is None: user_role.properties['permissions'] = [] user_role.properties['permissions'].append(permission)
['def', 'post', '(', 'method', ',', 'hmc', ',', 'uri', ',', 'uri_parms', ',', 'body', ',', 'logon_required', ',', 'wait_for_completion', ')', ':', 'assert', 'wait_for_completion', 'is', 'True', '# synchronous operation', 'user_role_oid', '=', 'uri_parms', '[', '0', ']', 'user_role_uri', '=', "'/api/user-roles/'", '+', 'user_role_oid', 'try', ':', 'user_role', '=', 'hmc', '.', 'lookup_by_uri', '(', 'user_role_uri', ')', 'except', 'KeyError', ':', 'raise', 'InvalidResourceError', '(', 'method', ',', 'uri', ')', 'check_required_fields', '(', 'method', ',', 'uri', ',', 'body', ',', '[', "'permitted-object'", ',', "'permitted-object-type'", ']', ')', '# Reject if User Role is system-defined:', 'if', 'user_role', '.', 'properties', '[', "'type'", ']', '==', "'system-defined'", ':', 'raise', 'BadRequestError', '(', 'method', ',', 'uri', ',', 'reason', '=', '314', ',', 'message', '=', '"Cannot add permission to "', '"system-defined user role: {}"', '.', 'format', '(', 'user_role_uri', ')', ')', '# Apply defaults, so our internally stored copy has all fields:', 'permission', '=', 'copy', '.', 'deepcopy', '(', 'body', ')', 'if', "'include-members'", 'not', 'in', 'permission', ':', 'permission', '[', "'include-members'", ']', '=', 'False', 'if', "'view-only-mode'", 'not', 'in', 'permission', ':', 'permission', '[', "'view-only-mode'", ']', '=', 'True', '# Add the permission to its store (the faked User Role object):', 'if', 'user_role', '.', 'properties', '.', 'get', '(', "'permissions'", ',', 'None', ')', 'is', 'None', ':', 'user_role', '.', 'properties', '[', "'permissions'", ']', '=', '[', ']', 'user_role', '.', 'properties', '[', "'permissions'", ']', '.', 'append', '(', 'permission', ')']
Operation: Add Permission to User Role.
['Operation', ':', 'Add', 'Permission', 'to', 'User', 'Role', '.']
train
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L717-L743
6,504
base4sistemas/satcfe
satcfe/entidades.py
Entidade.documento
def documento(self, *args, **kwargs): """Resulta no documento XML como string, que pode ou não incluir a declaração XML no início do documento. """ forcar_unicode = kwargs.pop('forcar_unicode', False) incluir_xml_decl = kwargs.pop('incluir_xml_decl', True) doc = ET.tostring(self._xml(*args, **kwargs), encoding='utf-8').decode('utf-8') if forcar_unicode: if incluir_xml_decl: doc = u'{}\n{}'.format(constantes.XML_DECL_UNICODE, doc) else: if incluir_xml_decl: doc = '{}\n{}'.format(constantes.XML_DECL, unidecode(doc)) else: doc = unidecode(doc) return doc
python
def documento(self, *args, **kwargs): """Resulta no documento XML como string, que pode ou não incluir a declaração XML no início do documento. """ forcar_unicode = kwargs.pop('forcar_unicode', False) incluir_xml_decl = kwargs.pop('incluir_xml_decl', True) doc = ET.tostring(self._xml(*args, **kwargs), encoding='utf-8').decode('utf-8') if forcar_unicode: if incluir_xml_decl: doc = u'{}\n{}'.format(constantes.XML_DECL_UNICODE, doc) else: if incluir_xml_decl: doc = '{}\n{}'.format(constantes.XML_DECL, unidecode(doc)) else: doc = unidecode(doc) return doc
['def', 'documento', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'forcar_unicode', '=', 'kwargs', '.', 'pop', '(', "'forcar_unicode'", ',', 'False', ')', 'incluir_xml_decl', '=', 'kwargs', '.', 'pop', '(', "'incluir_xml_decl'", ',', 'True', ')', 'doc', '=', 'ET', '.', 'tostring', '(', 'self', '.', '_xml', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ',', 'encoding', '=', "'utf-8'", ')', '.', 'decode', '(', "'utf-8'", ')', 'if', 'forcar_unicode', ':', 'if', 'incluir_xml_decl', ':', 'doc', '=', "u'{}\\n{}'", '.', 'format', '(', 'constantes', '.', 'XML_DECL_UNICODE', ',', 'doc', ')', 'else', ':', 'if', 'incluir_xml_decl', ':', 'doc', '=', "'{}\\n{}'", '.', 'format', '(', 'constantes', '.', 'XML_DECL', ',', 'unidecode', '(', 'doc', ')', ')', 'else', ':', 'doc', '=', 'unidecode', '(', 'doc', ')', 'return', 'doc']
Resulta no documento XML como string, que pode ou não incluir a declaração XML no início do documento.
['Resulta', 'no', 'documento', 'XML', 'como', 'string', 'que', 'pode', 'ou', 'não', 'incluir', 'a', 'declaração', 'XML', 'no', 'início', 'do', 'documento', '.']
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/entidades.py#L308-L324
6,505
lextoumbourou/txstripe
txstripe/resource.py
APIResource.refresh
def refresh(self): """Return a deferred.""" d = self.request('get', self.instance_url()) return d.addCallback(self.refresh_from).addCallback(lambda _: self)
python
def refresh(self): """Return a deferred.""" d = self.request('get', self.instance_url()) return d.addCallback(self.refresh_from).addCallback(lambda _: self)
['def', 'refresh', '(', 'self', ')', ':', 'd', '=', 'self', '.', 'request', '(', "'get'", ',', 'self', '.', 'instance_url', '(', ')', ')', 'return', 'd', '.', 'addCallback', '(', 'self', '.', 'refresh_from', ')', '.', 'addCallback', '(', 'lambda', '_', ':', 'self', ')']
Return a deferred.
['Return', 'a', 'deferred', '.']
train
https://github.com/lextoumbourou/txstripe/blob/a69e67f524258026fd1840655a0578311bba3b89/txstripe/resource.py#L138-L141
6,506
bhmm/bhmm
bhmm/api.py
estimate_hmm
def estimate_hmm(observations, nstates, lag=1, initial_model=None, output=None, reversible=True, stationary=False, p=None, accuracy=1e-3, maxit=1000, maxit_P=100000, mincount_connectivity=1e-2): r""" Estimate maximum-likelihood HMM Generic maximum-likelihood estimation of HMMs Parameters ---------- observations : list of numpy arrays representing temporal data `observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i` nstates : int The number of states in the model. lag : int the lag time at which observations should be read initial_model : HMM, optional, default=None If specified, the given initial model will be used to initialize the BHMM. Otherwise, a heuristic scheme is used to generate an initial guess. output : str, optional, default=None Output model type from [None, 'gaussian', 'discrete']. If None, will automatically select an output model type based on the format of observations. reversible : bool, optional, default=True If True, a prior that enforces reversible transition matrices (detailed balance) is used; otherwise, a standard non-reversible prior is used. stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. p : ndarray (nstates), optional, default=None Initial or fixed stationary distribution. If given and stationary=True, transition matrices will be estimated with the constraint that they have p as their stationary distribution. If given and stationary=False, p is the fixed initial distribution of hidden states. accuracy : float convergence threshold for EM iteration. When two the likelihood does not increase by more than accuracy, the iteration is stopped successfully. maxit : int stopping criterion for EM iteration. When so many iterations are performed without reaching the requested accuracy, the iteration is stopped without convergence (a warning is given) Return ------ hmm : :class:`HMM <bhmm.hmm.generic_hmm.HMM>` """ # select output model type if output is None: output = _guess_output_type(observations) if lag > 1: observations = lag_observations(observations, lag) # construct estimator from bhmm.estimators.maximum_likelihood import MaximumLikelihoodEstimator as _MaximumLikelihoodEstimator est = _MaximumLikelihoodEstimator(observations, nstates, initial_model=initial_model, output=output, reversible=reversible, stationary=stationary, p=p, accuracy=accuracy, maxit=maxit, maxit_P=maxit_P) # run est.fit() # set lag time est.hmm._lag = lag # return model # TODO: package into specific class (DiscreteHMM, GaussianHMM) return est.hmm
python
def estimate_hmm(observations, nstates, lag=1, initial_model=None, output=None, reversible=True, stationary=False, p=None, accuracy=1e-3, maxit=1000, maxit_P=100000, mincount_connectivity=1e-2): r""" Estimate maximum-likelihood HMM Generic maximum-likelihood estimation of HMMs Parameters ---------- observations : list of numpy arrays representing temporal data `observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i` nstates : int The number of states in the model. lag : int the lag time at which observations should be read initial_model : HMM, optional, default=None If specified, the given initial model will be used to initialize the BHMM. Otherwise, a heuristic scheme is used to generate an initial guess. output : str, optional, default=None Output model type from [None, 'gaussian', 'discrete']. If None, will automatically select an output model type based on the format of observations. reversible : bool, optional, default=True If True, a prior that enforces reversible transition matrices (detailed balance) is used; otherwise, a standard non-reversible prior is used. stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. p : ndarray (nstates), optional, default=None Initial or fixed stationary distribution. If given and stationary=True, transition matrices will be estimated with the constraint that they have p as their stationary distribution. If given and stationary=False, p is the fixed initial distribution of hidden states. accuracy : float convergence threshold for EM iteration. When two the likelihood does not increase by more than accuracy, the iteration is stopped successfully. maxit : int stopping criterion for EM iteration. When so many iterations are performed without reaching the requested accuracy, the iteration is stopped without convergence (a warning is given) Return ------ hmm : :class:`HMM <bhmm.hmm.generic_hmm.HMM>` """ # select output model type if output is None: output = _guess_output_type(observations) if lag > 1: observations = lag_observations(observations, lag) # construct estimator from bhmm.estimators.maximum_likelihood import MaximumLikelihoodEstimator as _MaximumLikelihoodEstimator est = _MaximumLikelihoodEstimator(observations, nstates, initial_model=initial_model, output=output, reversible=reversible, stationary=stationary, p=p, accuracy=accuracy, maxit=maxit, maxit_P=maxit_P) # run est.fit() # set lag time est.hmm._lag = lag # return model # TODO: package into specific class (DiscreteHMM, GaussianHMM) return est.hmm
['def', 'estimate_hmm', '(', 'observations', ',', 'nstates', ',', 'lag', '=', '1', ',', 'initial_model', '=', 'None', ',', 'output', '=', 'None', ',', 'reversible', '=', 'True', ',', 'stationary', '=', 'False', ',', 'p', '=', 'None', ',', 'accuracy', '=', '1e-3', ',', 'maxit', '=', '1000', ',', 'maxit_P', '=', '100000', ',', 'mincount_connectivity', '=', '1e-2', ')', ':', '# select output model type', 'if', 'output', 'is', 'None', ':', 'output', '=', '_guess_output_type', '(', 'observations', ')', 'if', 'lag', '>', '1', ':', 'observations', '=', 'lag_observations', '(', 'observations', ',', 'lag', ')', '# construct estimator', 'from', 'bhmm', '.', 'estimators', '.', 'maximum_likelihood', 'import', 'MaximumLikelihoodEstimator', 'as', '_MaximumLikelihoodEstimator', 'est', '=', '_MaximumLikelihoodEstimator', '(', 'observations', ',', 'nstates', ',', 'initial_model', '=', 'initial_model', ',', 'output', '=', 'output', ',', 'reversible', '=', 'reversible', ',', 'stationary', '=', 'stationary', ',', 'p', '=', 'p', ',', 'accuracy', '=', 'accuracy', ',', 'maxit', '=', 'maxit', ',', 'maxit_P', '=', 'maxit_P', ')', '# run', 'est', '.', 'fit', '(', ')', '# set lag time', 'est', '.', 'hmm', '.', '_lag', '=', 'lag', '# return model', '# TODO: package into specific class (DiscreteHMM, GaussianHMM)', 'return', 'est', '.', 'hmm']
r""" Estimate maximum-likelihood HMM Generic maximum-likelihood estimation of HMMs Parameters ---------- observations : list of numpy arrays representing temporal data `observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i` nstates : int The number of states in the model. lag : int the lag time at which observations should be read initial_model : HMM, optional, default=None If specified, the given initial model will be used to initialize the BHMM. Otherwise, a heuristic scheme is used to generate an initial guess. output : str, optional, default=None Output model type from [None, 'gaussian', 'discrete']. If None, will automatically select an output model type based on the format of observations. reversible : bool, optional, default=True If True, a prior that enforces reversible transition matrices (detailed balance) is used; otherwise, a standard non-reversible prior is used. stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. p : ndarray (nstates), optional, default=None Initial or fixed stationary distribution. If given and stationary=True, transition matrices will be estimated with the constraint that they have p as their stationary distribution. If given and stationary=False, p is the fixed initial distribution of hidden states. accuracy : float convergence threshold for EM iteration. When two the likelihood does not increase by more than accuracy, the iteration is stopped successfully. maxit : int stopping criterion for EM iteration. When so many iterations are performed without reaching the requested accuracy, the iteration is stopped without convergence (a warning is given) Return ------ hmm : :class:`HMM <bhmm.hmm.generic_hmm.HMM>`
['r', 'Estimate', 'maximum', '-', 'likelihood', 'HMM']
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/api.py#L309-L372
6,507
nerdvegas/rez
src/rezgui/objects/Config.py
Config.prepend_string_list
def prepend_string_list(self, key, value, max_length_key): """Prepend a fixed-length string list with a new string. The oldest string will be removed from the list. If the string is already in the list, it is shuffled to the top. Use this to implement things like a 'most recent files' entry. """ max_len = self.get(max_length_key) strings = self.get_string_list(key) strings = [value] + [x for x in strings if x != value] strings = strings[:max_len] self.beginWriteArray(key) for i in range(len(strings)): self.setArrayIndex(i) self.setValue("entry", strings[i]) self.endArray()
python
def prepend_string_list(self, key, value, max_length_key): """Prepend a fixed-length string list with a new string. The oldest string will be removed from the list. If the string is already in the list, it is shuffled to the top. Use this to implement things like a 'most recent files' entry. """ max_len = self.get(max_length_key) strings = self.get_string_list(key) strings = [value] + [x for x in strings if x != value] strings = strings[:max_len] self.beginWriteArray(key) for i in range(len(strings)): self.setArrayIndex(i) self.setValue("entry", strings[i]) self.endArray()
['def', 'prepend_string_list', '(', 'self', ',', 'key', ',', 'value', ',', 'max_length_key', ')', ':', 'max_len', '=', 'self', '.', 'get', '(', 'max_length_key', ')', 'strings', '=', 'self', '.', 'get_string_list', '(', 'key', ')', 'strings', '=', '[', 'value', ']', '+', '[', 'x', 'for', 'x', 'in', 'strings', 'if', 'x', '!=', 'value', ']', 'strings', '=', 'strings', '[', ':', 'max_len', ']', 'self', '.', 'beginWriteArray', '(', 'key', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'strings', ')', ')', ':', 'self', '.', 'setArrayIndex', '(', 'i', ')', 'self', '.', 'setValue', '(', '"entry"', ',', 'strings', '[', 'i', ']', ')', 'self', '.', 'endArray', '(', ')']
Prepend a fixed-length string list with a new string. The oldest string will be removed from the list. If the string is already in the list, it is shuffled to the top. Use this to implement things like a 'most recent files' entry.
['Prepend', 'a', 'fixed', '-', 'length', 'string', 'list', 'with', 'a', 'new', 'string', '.']
train
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/objects/Config.py#L49-L65
6,508
panzarino/mlbgame
mlbgame/stats.py
player_stats
def player_stats(game_id): """Return dictionary of individual stats of a game with matching id. The additional pitching/batting is mostly the same stats, except it contains some useful stats such as groundouts/flyouts per pitcher (go/ao). MLB decided to have two box score files, thus we return the data from both. """ # get data from data module box_score = mlbgame.data.get_box_score(game_id) box_score_tree = etree.parse(box_score).getroot() # get pitching and batting info pitching = box_score_tree.findall('pitching') batting = box_score_tree.findall('batting') # get parsed stats pitching_info = __player_stats_info(pitching, 'pitcher') batting_info = __player_stats_info(batting, 'batter') # rawboxscore not available after 2018 try: raw_box_score = mlbgame.data.get_raw_box_score(game_id) raw_box_score_tree = etree.parse(raw_box_score).getroot() additional_stats = __raw_player_stats_info(raw_box_score_tree) addl_home_pitching = additional_stats[0]['pitchers'] addl_home_batting = additional_stats[0]['batters'] addl_away_pitching = additional_stats[1]['pitchers'] addl_away_batting = additional_stats[1]['batters'] output = { 'home_pitching': pitching_info[0], 'away_pitching': pitching_info[1], 'home_batting': batting_info[0], 'away_batting': batting_info[1], 'home_additional_pitching': addl_home_pitching, 'away_additional_pitching': addl_away_pitching, 'home_additional_batting': addl_home_batting, 'away_additional_batting': addl_away_batting } except etree.XMLSyntaxError: output = { 'home_pitching': pitching_info[0], 'away_pitching': pitching_info[1], 'home_batting': batting_info[0], 'away_batting': batting_info[1], } return output
python
def player_stats(game_id): """Return dictionary of individual stats of a game with matching id. The additional pitching/batting is mostly the same stats, except it contains some useful stats such as groundouts/flyouts per pitcher (go/ao). MLB decided to have two box score files, thus we return the data from both. """ # get data from data module box_score = mlbgame.data.get_box_score(game_id) box_score_tree = etree.parse(box_score).getroot() # get pitching and batting info pitching = box_score_tree.findall('pitching') batting = box_score_tree.findall('batting') # get parsed stats pitching_info = __player_stats_info(pitching, 'pitcher') batting_info = __player_stats_info(batting, 'batter') # rawboxscore not available after 2018 try: raw_box_score = mlbgame.data.get_raw_box_score(game_id) raw_box_score_tree = etree.parse(raw_box_score).getroot() additional_stats = __raw_player_stats_info(raw_box_score_tree) addl_home_pitching = additional_stats[0]['pitchers'] addl_home_batting = additional_stats[0]['batters'] addl_away_pitching = additional_stats[1]['pitchers'] addl_away_batting = additional_stats[1]['batters'] output = { 'home_pitching': pitching_info[0], 'away_pitching': pitching_info[1], 'home_batting': batting_info[0], 'away_batting': batting_info[1], 'home_additional_pitching': addl_home_pitching, 'away_additional_pitching': addl_away_pitching, 'home_additional_batting': addl_home_batting, 'away_additional_batting': addl_away_batting } except etree.XMLSyntaxError: output = { 'home_pitching': pitching_info[0], 'away_pitching': pitching_info[1], 'home_batting': batting_info[0], 'away_batting': batting_info[1], } return output
['def', 'player_stats', '(', 'game_id', ')', ':', '# get data from data module', 'box_score', '=', 'mlbgame', '.', 'data', '.', 'get_box_score', '(', 'game_id', ')', 'box_score_tree', '=', 'etree', '.', 'parse', '(', 'box_score', ')', '.', 'getroot', '(', ')', '# get pitching and batting info', 'pitching', '=', 'box_score_tree', '.', 'findall', '(', "'pitching'", ')', 'batting', '=', 'box_score_tree', '.', 'findall', '(', "'batting'", ')', '# get parsed stats', 'pitching_info', '=', '__player_stats_info', '(', 'pitching', ',', "'pitcher'", ')', 'batting_info', '=', '__player_stats_info', '(', 'batting', ',', "'batter'", ')', '# rawboxscore not available after 2018', 'try', ':', 'raw_box_score', '=', 'mlbgame', '.', 'data', '.', 'get_raw_box_score', '(', 'game_id', ')', 'raw_box_score_tree', '=', 'etree', '.', 'parse', '(', 'raw_box_score', ')', '.', 'getroot', '(', ')', 'additional_stats', '=', '__raw_player_stats_info', '(', 'raw_box_score_tree', ')', 'addl_home_pitching', '=', 'additional_stats', '[', '0', ']', '[', "'pitchers'", ']', 'addl_home_batting', '=', 'additional_stats', '[', '0', ']', '[', "'batters'", ']', 'addl_away_pitching', '=', 'additional_stats', '[', '1', ']', '[', "'pitchers'", ']', 'addl_away_batting', '=', 'additional_stats', '[', '1', ']', '[', "'batters'", ']', 'output', '=', '{', "'home_pitching'", ':', 'pitching_info', '[', '0', ']', ',', "'away_pitching'", ':', 'pitching_info', '[', '1', ']', ',', "'home_batting'", ':', 'batting_info', '[', '0', ']', ',', "'away_batting'", ':', 'batting_info', '[', '1', ']', ',', "'home_additional_pitching'", ':', 'addl_home_pitching', ',', "'away_additional_pitching'", ':', 'addl_away_pitching', ',', "'home_additional_batting'", ':', 'addl_home_batting', ',', "'away_additional_batting'", ':', 'addl_away_batting', '}', 'except', 'etree', '.', 'XMLSyntaxError', ':', 'output', '=', '{', "'home_pitching'", ':', 'pitching_info', '[', '0', ']', ',', "'away_pitching'", ':', 'pitching_info', '[', '1', ']', ',', "'home_batting'", ':', 'batting_info', '[', '0', ']', ',', "'away_batting'", ':', 'batting_info', '[', '1', ']', ',', '}', 'return', 'output']
Return dictionary of individual stats of a game with matching id. The additional pitching/batting is mostly the same stats, except it contains some useful stats such as groundouts/flyouts per pitcher (go/ao). MLB decided to have two box score files, thus we return the data from both.
['Return', 'dictionary', 'of', 'individual', 'stats', 'of', 'a', 'game', 'with', 'matching', 'id', '.']
train
https://github.com/panzarino/mlbgame/blob/0a2d10540de793fdc3b8476aa18f5cf3b53d0b54/mlbgame/stats.py#L62-L106
6,509
pysathq/pysat
pysat/solvers.py
Glucose3.delete
def delete(self): """ Destructor. """ if self.glucose: pysolvers.glucose3_del(self.glucose) self.glucose = None if self.prfile: self.prfile.close()
python
def delete(self): """ Destructor. """ if self.glucose: pysolvers.glucose3_del(self.glucose) self.glucose = None if self.prfile: self.prfile.close()
['def', 'delete', '(', 'self', ')', ':', 'if', 'self', '.', 'glucose', ':', 'pysolvers', '.', 'glucose3_del', '(', 'self', '.', 'glucose', ')', 'self', '.', 'glucose', '=', 'None', 'if', 'self', '.', 'prfile', ':', 'self', '.', 'prfile', '.', 'close', '(', ')']
Destructor.
['Destructor', '.']
train
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L987-L997
6,510
lago-project/lago
lago/utils.py
load_virt_stream
def load_virt_stream(virt_fd): """ Loads the given conf stream into a dict, trying different formats if needed Args: virt_fd (str): file like objcect with the virt config to load Returns: dict: Loaded virt config """ try: virt_conf = json.load(virt_fd) except ValueError: virt_fd.seek(0) virt_conf = yaml.load(virt_fd) return deepcopy(virt_conf)
python
def load_virt_stream(virt_fd): """ Loads the given conf stream into a dict, trying different formats if needed Args: virt_fd (str): file like objcect with the virt config to load Returns: dict: Loaded virt config """ try: virt_conf = json.load(virt_fd) except ValueError: virt_fd.seek(0) virt_conf = yaml.load(virt_fd) return deepcopy(virt_conf)
['def', 'load_virt_stream', '(', 'virt_fd', ')', ':', 'try', ':', 'virt_conf', '=', 'json', '.', 'load', '(', 'virt_fd', ')', 'except', 'ValueError', ':', 'virt_fd', '.', 'seek', '(', '0', ')', 'virt_conf', '=', 'yaml', '.', 'load', '(', 'virt_fd', ')', 'return', 'deepcopy', '(', 'virt_conf', ')']
Loads the given conf stream into a dict, trying different formats if needed Args: virt_fd (str): file like objcect with the virt config to load Returns: dict: Loaded virt config
['Loads', 'the', 'given', 'conf', 'stream', 'into', 'a', 'dict', 'trying', 'different', 'formats', 'if', 'needed']
train
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/utils.py#L513-L530
6,511
PmagPy/PmagPy
pmagpy/pmag.py
s_l
def s_l(l, alpha): """ get sigma as a function of degree l from Constable and Parker (1988) """ a2 = alpha**2 c_a = 0.547 s_l = np.sqrt(old_div(((c_a**(2. * l)) * a2), ((l + 1.) * (2. * l + 1.)))) return s_l
python
def s_l(l, alpha): """ get sigma as a function of degree l from Constable and Parker (1988) """ a2 = alpha**2 c_a = 0.547 s_l = np.sqrt(old_div(((c_a**(2. * l)) * a2), ((l + 1.) * (2. * l + 1.)))) return s_l
['def', 's_l', '(', 'l', ',', 'alpha', ')', ':', 'a2', '=', 'alpha', '**', '2', 'c_a', '=', '0.547', 's_l', '=', 'np', '.', 'sqrt', '(', 'old_div', '(', '(', '(', 'c_a', '**', '(', '2.', '*', 'l', ')', ')', '*', 'a2', ')', ',', '(', '(', 'l', '+', '1.', ')', '*', '(', '2.', '*', 'l', '+', '1.', ')', ')', ')', ')', 'return', 's_l']
get sigma as a function of degree l from Constable and Parker (1988)
['get', 'sigma', 'as', 'a', 'function', 'of', 'degree', 'l', 'from', 'Constable', 'and', 'Parker', '(', '1988', ')']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L9264-L9271
6,512
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/PharLapCommon.py
getPharLapPath
def getPharLapPath(): """Reads the registry to find the installed path of the Phar Lap ETS development kit. Raises UserError if no installed version of Phar Lap can be found.""" if not SCons.Util.can_read_reg: raise SCons.Errors.InternalError("No Windows registry module was found") try: k=SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Pharlap\\ETS') val, type = SCons.Util.RegQueryValueEx(k, 'BaseDir') # The following is a hack...there is (not surprisingly) # an odd issue in the Phar Lap plug in that inserts # a bunch of junk data after the phar lap path in the # registry. We must trim it. idx=val.find('\0') if idx >= 0: val = val[:idx] return os.path.normpath(val) except SCons.Util.RegError: raise SCons.Errors.UserError("Cannot find Phar Lap ETS path in the registry. Is it installed properly?")
python
def getPharLapPath(): """Reads the registry to find the installed path of the Phar Lap ETS development kit. Raises UserError if no installed version of Phar Lap can be found.""" if not SCons.Util.can_read_reg: raise SCons.Errors.InternalError("No Windows registry module was found") try: k=SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Pharlap\\ETS') val, type = SCons.Util.RegQueryValueEx(k, 'BaseDir') # The following is a hack...there is (not surprisingly) # an odd issue in the Phar Lap plug in that inserts # a bunch of junk data after the phar lap path in the # registry. We must trim it. idx=val.find('\0') if idx >= 0: val = val[:idx] return os.path.normpath(val) except SCons.Util.RegError: raise SCons.Errors.UserError("Cannot find Phar Lap ETS path in the registry. Is it installed properly?")
['def', 'getPharLapPath', '(', ')', ':', 'if', 'not', 'SCons', '.', 'Util', '.', 'can_read_reg', ':', 'raise', 'SCons', '.', 'Errors', '.', 'InternalError', '(', '"No Windows registry module was found"', ')', 'try', ':', 'k', '=', 'SCons', '.', 'Util', '.', 'RegOpenKeyEx', '(', 'SCons', '.', 'Util', '.', 'HKEY_LOCAL_MACHINE', ',', "'SOFTWARE\\\\Pharlap\\\\ETS'", ')', 'val', ',', 'type', '=', 'SCons', '.', 'Util', '.', 'RegQueryValueEx', '(', 'k', ',', "'BaseDir'", ')', '# The following is a hack...there is (not surprisingly)', '# an odd issue in the Phar Lap plug in that inserts', '# a bunch of junk data after the phar lap path in the', '# registry. We must trim it.', 'idx', '=', 'val', '.', 'find', '(', "'\\0'", ')', 'if', 'idx', '>=', '0', ':', 'val', '=', 'val', '[', ':', 'idx', ']', 'return', 'os', '.', 'path', '.', 'normpath', '(', 'val', ')', 'except', 'SCons', '.', 'Util', '.', 'RegError', ':', 'raise', 'SCons', '.', 'Errors', '.', 'UserError', '(', '"Cannot find Phar Lap ETS path in the registry. Is it installed properly?"', ')']
Reads the registry to find the installed path of the Phar Lap ETS development kit. Raises UserError if no installed version of Phar Lap can be found.
['Reads', 'the', 'registry', 'to', 'find', 'the', 'installed', 'path', 'of', 'the', 'Phar', 'Lap', 'ETS', 'development', 'kit', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/PharLapCommon.py#L40-L64
6,513
wonambi-python/wonambi
wonambi/ioeeg/moberg.py
Moberg.return_dat
def return_dat(self, chan, begsam, endsam): """Return the data as 2D numpy.ndarray. Parameters ---------- chan : int or list index (indices) of the channels to read begsam : int index of the first sample endsam : int index of the last sample Returns ------- numpy.ndarray A 2d matrix, with dimension chan X samples """ if begsam < 0: begpad = -1 * begsam begsam = 0 else: begpad = 0 if endsam > self.n_smp: endpad = endsam - self.n_smp endsam = self.n_smp else: endpad = 0 first_sam = DATA_PRECISION * self.n_chan * begsam toread_sam = DATA_PRECISION * self.n_chan * (endsam - begsam) with open(join(self.filename, EEG_FILE), 'rb') as f: f.seek(first_sam) x = f.read(toread_sam) dat = _read_dat(x) dat = reshape(dat, (self.n_chan, -1), 'F') dat = self.convertion(dat[chan, :]) dat = pad(dat, ((0, 0), (begpad, endpad)), mode='constant', constant_values=NaN) return dat
python
def return_dat(self, chan, begsam, endsam): """Return the data as 2D numpy.ndarray. Parameters ---------- chan : int or list index (indices) of the channels to read begsam : int index of the first sample endsam : int index of the last sample Returns ------- numpy.ndarray A 2d matrix, with dimension chan X samples """ if begsam < 0: begpad = -1 * begsam begsam = 0 else: begpad = 0 if endsam > self.n_smp: endpad = endsam - self.n_smp endsam = self.n_smp else: endpad = 0 first_sam = DATA_PRECISION * self.n_chan * begsam toread_sam = DATA_PRECISION * self.n_chan * (endsam - begsam) with open(join(self.filename, EEG_FILE), 'rb') as f: f.seek(first_sam) x = f.read(toread_sam) dat = _read_dat(x) dat = reshape(dat, (self.n_chan, -1), 'F') dat = self.convertion(dat[chan, :]) dat = pad(dat, ((0, 0), (begpad, endpad)), mode='constant', constant_values=NaN) return dat
['def', 'return_dat', '(', 'self', ',', 'chan', ',', 'begsam', ',', 'endsam', ')', ':', 'if', 'begsam', '<', '0', ':', 'begpad', '=', '-', '1', '*', 'begsam', 'begsam', '=', '0', 'else', ':', 'begpad', '=', '0', 'if', 'endsam', '>', 'self', '.', 'n_smp', ':', 'endpad', '=', 'endsam', '-', 'self', '.', 'n_smp', 'endsam', '=', 'self', '.', 'n_smp', 'else', ':', 'endpad', '=', '0', 'first_sam', '=', 'DATA_PRECISION', '*', 'self', '.', 'n_chan', '*', 'begsam', 'toread_sam', '=', 'DATA_PRECISION', '*', 'self', '.', 'n_chan', '*', '(', 'endsam', '-', 'begsam', ')', 'with', 'open', '(', 'join', '(', 'self', '.', 'filename', ',', 'EEG_FILE', ')', ',', "'rb'", ')', 'as', 'f', ':', 'f', '.', 'seek', '(', 'first_sam', ')', 'x', '=', 'f', '.', 'read', '(', 'toread_sam', ')', 'dat', '=', '_read_dat', '(', 'x', ')', 'dat', '=', 'reshape', '(', 'dat', ',', '(', 'self', '.', 'n_chan', ',', '-', '1', ')', ',', "'F'", ')', 'dat', '=', 'self', '.', 'convertion', '(', 'dat', '[', 'chan', ',', ':', ']', ')', 'dat', '=', 'pad', '(', 'dat', ',', '(', '(', '0', ',', '0', ')', ',', '(', 'begpad', ',', 'endpad', ')', ')', ',', 'mode', '=', "'constant'", ',', 'constant_values', '=', 'NaN', ')', 'return', 'dat']
Return the data as 2D numpy.ndarray. Parameters ---------- chan : int or list index (indices) of the channels to read begsam : int index of the first sample endsam : int index of the last sample Returns ------- numpy.ndarray A 2d matrix, with dimension chan X samples
['Return', 'the', 'data', 'as', '2D', 'numpy', '.', 'ndarray', '.']
train
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/moberg.py#L99-L141
6,514
pandas-dev/pandas
pandas/core/resample.py
DatetimeIndexResampler._upsample
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ self._set_binner() if self.axis: raise AssertionError('axis must be 0') if self._from_selection: raise ValueError("Upsampling from level= or on= selection" " is not supported, use .set_index(...)" " to explicitly set index to" " datetime-like") ax = self.ax obj = self._selected_obj binner = self.binner res_index = self._adjust_binner_for_upsample(binner) # if we have the same frequency as our axis, then we are equal sampling if limit is None and to_offset(ax.inferred_freq) == self.freq: result = obj.copy() result.index = res_index else: result = obj.reindex(res_index, method=method, limit=limit, fill_value=fill_value) result = self._apply_loffset(result) return self._wrap_result(result)
python
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ self._set_binner() if self.axis: raise AssertionError('axis must be 0') if self._from_selection: raise ValueError("Upsampling from level= or on= selection" " is not supported, use .set_index(...)" " to explicitly set index to" " datetime-like") ax = self.ax obj = self._selected_obj binner = self.binner res_index = self._adjust_binner_for_upsample(binner) # if we have the same frequency as our axis, then we are equal sampling if limit is None and to_offset(ax.inferred_freq) == self.freq: result = obj.copy() result.index = res_index else: result = obj.reindex(res_index, method=method, limit=limit, fill_value=fill_value) result = self._apply_loffset(result) return self._wrap_result(result)
['def', '_upsample', '(', 'self', ',', 'method', ',', 'limit', '=', 'None', ',', 'fill_value', '=', 'None', ')', ':', 'self', '.', '_set_binner', '(', ')', 'if', 'self', '.', 'axis', ':', 'raise', 'AssertionError', '(', "'axis must be 0'", ')', 'if', 'self', '.', '_from_selection', ':', 'raise', 'ValueError', '(', '"Upsampling from level= or on= selection"', '" is not supported, use .set_index(...)"', '" to explicitly set index to"', '" datetime-like"', ')', 'ax', '=', 'self', '.', 'ax', 'obj', '=', 'self', '.', '_selected_obj', 'binner', '=', 'self', '.', 'binner', 'res_index', '=', 'self', '.', '_adjust_binner_for_upsample', '(', 'binner', ')', '# if we have the same frequency as our axis, then we are equal sampling', 'if', 'limit', 'is', 'None', 'and', 'to_offset', '(', 'ax', '.', 'inferred_freq', ')', '==', 'self', '.', 'freq', ':', 'result', '=', 'obj', '.', 'copy', '(', ')', 'result', '.', 'index', '=', 'res_index', 'else', ':', 'result', '=', 'obj', '.', 'reindex', '(', 'res_index', ',', 'method', '=', 'method', ',', 'limit', '=', 'limit', ',', 'fill_value', '=', 'fill_value', ')', 'result', '=', 'self', '.', '_apply_loffset', '(', 'result', ')', 'return', 'self', '.', '_wrap_result', '(', 'result', ')']
Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna
['Parameters', '----------', 'method', ':', 'string', '{', 'backfill', 'bfill', 'pad', 'ffill', 'asfreq', '}', 'method', 'for', 'upsampling', 'limit', ':', 'int', 'default', 'None', 'Maximum', 'size', 'gap', 'to', 'fill', 'when', 'reindexing', 'fill_value', ':', 'scalar', 'default', 'None', 'Value', 'to', 'use', 'for', 'missing', 'values']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1030-L1069
6,515
praekeltfoundation/molo
molo/core/api/importers.py
list_of_objects_from_api
def list_of_objects_from_api(url): ''' API only serves 20 pages by default This fetches info on all of items and return them as a list Assumption: limit of API is not less than 20 ''' response = requests.get(url) content = json.loads(response.content) count = content["meta"]["total_count"] if count <= 20: return content["items"] else: items = [] + content["items"] num_requests = int(math.ceil(count // 20)) for i in range(1, num_requests + 1): paginated_url = "{}?limit=20&offset={}".format( url, str(i * 20)) paginated_response = requests.get(paginated_url) items = items + json.loads(paginated_response.content)["items"] return items
python
def list_of_objects_from_api(url): ''' API only serves 20 pages by default This fetches info on all of items and return them as a list Assumption: limit of API is not less than 20 ''' response = requests.get(url) content = json.loads(response.content) count = content["meta"]["total_count"] if count <= 20: return content["items"] else: items = [] + content["items"] num_requests = int(math.ceil(count // 20)) for i in range(1, num_requests + 1): paginated_url = "{}?limit=20&offset={}".format( url, str(i * 20)) paginated_response = requests.get(paginated_url) items = items + json.loads(paginated_response.content)["items"] return items
['def', 'list_of_objects_from_api', '(', 'url', ')', ':', 'response', '=', 'requests', '.', 'get', '(', 'url', ')', 'content', '=', 'json', '.', 'loads', '(', 'response', '.', 'content', ')', 'count', '=', 'content', '[', '"meta"', ']', '[', '"total_count"', ']', 'if', 'count', '<=', '20', ':', 'return', 'content', '[', '"items"', ']', 'else', ':', 'items', '=', '[', ']', '+', 'content', '[', '"items"', ']', 'num_requests', '=', 'int', '(', 'math', '.', 'ceil', '(', 'count', '//', '20', ')', ')', 'for', 'i', 'in', 'range', '(', '1', ',', 'num_requests', '+', '1', ')', ':', 'paginated_url', '=', '"{}?limit=20&offset={}"', '.', 'format', '(', 'url', ',', 'str', '(', 'i', '*', '20', ')', ')', 'paginated_response', '=', 'requests', '.', 'get', '(', 'paginated_url', ')', 'items', '=', 'items', '+', 'json', '.', 'loads', '(', 'paginated_response', '.', 'content', ')', '[', '"items"', ']', 'return', 'items']
API only serves 20 pages by default This fetches info on all of items and return them as a list Assumption: limit of API is not less than 20
['API', 'only', 'serves', '20', 'pages', 'by', 'default', 'This', 'fetches', 'info', 'on', 'all', 'of', 'items', 'and', 'return', 'them', 'as', 'a', 'list']
train
https://github.com/praekeltfoundation/molo/blob/57702fda4fab261d67591415f7d46bc98fa38525/molo/core/api/importers.py#L53-L76
6,516
digidotcom/python-devicecloud
devicecloud/monitor_tcp.py
TCPClientManager.stop
def stop(self): """Stops all session activity. Blocks until io and writer thread dies """ if self._io_thread is not None: self.log.info("Waiting for I/O thread to stop...") self.closed = True self._io_thread.join() if self._writer_thread is not None: self.log.info("Waiting for Writer Thread to stop...") self.closed = True self._writer_thread.join() self.log.info("All worker threads stopped.")
python
def stop(self): """Stops all session activity. Blocks until io and writer thread dies """ if self._io_thread is not None: self.log.info("Waiting for I/O thread to stop...") self.closed = True self._io_thread.join() if self._writer_thread is not None: self.log.info("Waiting for Writer Thread to stop...") self.closed = True self._writer_thread.join() self.log.info("All worker threads stopped.")
['def', 'stop', '(', 'self', ')', ':', 'if', 'self', '.', '_io_thread', 'is', 'not', 'None', ':', 'self', '.', 'log', '.', 'info', '(', '"Waiting for I/O thread to stop..."', ')', 'self', '.', 'closed', '=', 'True', 'self', '.', '_io_thread', '.', 'join', '(', ')', 'if', 'self', '.', '_writer_thread', 'is', 'not', 'None', ':', 'self', '.', 'log', '.', 'info', '(', '"Waiting for Writer Thread to stop..."', ')', 'self', '.', 'closed', '=', 'True', 'self', '.', '_writer_thread', '.', 'join', '(', ')', 'self', '.', 'log', '.', 'info', '(', '"All worker threads stopped."', ')']
Stops all session activity. Blocks until io and writer thread dies
['Stops', 'all', 'session', 'activity', '.']
train
https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/monitor_tcp.py#L564-L579
6,517
mitsei/dlkit
dlkit/aws_adapter/repository/managers.py
RepositoryProxyManager.get_asset_admin_session_for_repository
def get_asset_admin_session_for_repository(self, repository_id=None, proxy=None): """Gets an asset administration session for the given repository. arg: repository_id (osid.id.Id): the ``Id`` of the repository arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetAdminSession) - an ``AssetAdminSession`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_admin()`` and ``supports_visible_federation()`` are ``true``.* """ asset_lookup_session = self._provider_manager.get_asset_lookup_session_for_repository( repository_id, proxy) return AssetAdminSession( self._provider_manager.get_asset_admin_session_for_repository(repository_id, proxy), self._config_map, asset_lookup_session)
python
def get_asset_admin_session_for_repository(self, repository_id=None, proxy=None): """Gets an asset administration session for the given repository. arg: repository_id (osid.id.Id): the ``Id`` of the repository arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetAdminSession) - an ``AssetAdminSession`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_admin()`` and ``supports_visible_federation()`` are ``true``.* """ asset_lookup_session = self._provider_manager.get_asset_lookup_session_for_repository( repository_id, proxy) return AssetAdminSession( self._provider_manager.get_asset_admin_session_for_repository(repository_id, proxy), self._config_map, asset_lookup_session)
['def', 'get_asset_admin_session_for_repository', '(', 'self', ',', 'repository_id', '=', 'None', ',', 'proxy', '=', 'None', ')', ':', 'asset_lookup_session', '=', 'self', '.', '_provider_manager', '.', 'get_asset_lookup_session_for_repository', '(', 'repository_id', ',', 'proxy', ')', 'return', 'AssetAdminSession', '(', 'self', '.', '_provider_manager', '.', 'get_asset_admin_session_for_repository', '(', 'repository_id', ',', 'proxy', ')', ',', 'self', '.', '_config_map', ',', 'asset_lookup_session', ')']
Gets an asset administration session for the given repository. arg: repository_id (osid.id.Id): the ``Id`` of the repository arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetAdminSession) - an ``AssetAdminSession`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_admin()`` and ``supports_visible_federation()`` are ``true``.*
['Gets', 'an', 'asset', 'administration', 'session', 'for', 'the', 'given', 'repository', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/aws_adapter/repository/managers.py#L1865-L1889
6,518
radjkarl/imgProcessor
imgProcessor/camera/LensDistortion.py
LensDistortion.standardUncertainties
def standardUncertainties(self, sharpness=0.5): ''' sharpness -> image sharpness // std of Gaussian PSF [px] returns a list of standard uncertainties for the x and y component: (1x,2x), (1y, 2y), (intensity:None) 1. px-size-changes(due to deflection) 2. reprojection error ''' height, width = self.coeffs['shape'] fx, fy = self.getDeflection(width, height) # is RMSE of imgPoint-projectedPoints r = self.coeffs['reprojectionError'] t = (sharpness**2 + r**2)**0.5 return fx * t, fy * t
python
def standardUncertainties(self, sharpness=0.5): ''' sharpness -> image sharpness // std of Gaussian PSF [px] returns a list of standard uncertainties for the x and y component: (1x,2x), (1y, 2y), (intensity:None) 1. px-size-changes(due to deflection) 2. reprojection error ''' height, width = self.coeffs['shape'] fx, fy = self.getDeflection(width, height) # is RMSE of imgPoint-projectedPoints r = self.coeffs['reprojectionError'] t = (sharpness**2 + r**2)**0.5 return fx * t, fy * t
['def', 'standardUncertainties', '(', 'self', ',', 'sharpness', '=', '0.5', ')', ':', 'height', ',', 'width', '=', 'self', '.', 'coeffs', '[', "'shape'", ']', 'fx', ',', 'fy', '=', 'self', '.', 'getDeflection', '(', 'width', ',', 'height', ')', '# is RMSE of imgPoint-projectedPoints\r', 'r', '=', 'self', '.', 'coeffs', '[', "'reprojectionError'", ']', 't', '=', '(', 'sharpness', '**', '2', '+', 'r', '**', '2', ')', '**', '0.5', 'return', 'fx', '*', 't', ',', 'fy', '*', 't']
sharpness -> image sharpness // std of Gaussian PSF [px] returns a list of standard uncertainties for the x and y component: (1x,2x), (1y, 2y), (intensity:None) 1. px-size-changes(due to deflection) 2. reprojection error
['sharpness', '-', '>', 'image', 'sharpness', '//', 'std', 'of', 'Gaussian', 'PSF', '[', 'px', ']', 'returns', 'a', 'list', 'of', 'standard', 'uncertainties', 'for', 'the', 'x', 'and', 'y', 'component', ':', '(', '1x', '2x', ')', '(', '1y', '2y', ')', '(', 'intensity', ':', 'None', ')', '1', '.', 'px', '-', 'size', '-', 'changes', '(', 'due', 'to', 'deflection', ')', '2', '.', 'reprojection', 'error']
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L404-L418
6,519
alpacahq/pylivetrader
pylivetrader/loader.py
translate
def translate(script): '''translate zipline script into pylivetrader script. ''' tree = ast.parse(script) ZiplineImportVisitor().visit(tree) return astor.to_source(tree)
python
def translate(script): '''translate zipline script into pylivetrader script. ''' tree = ast.parse(script) ZiplineImportVisitor().visit(tree) return astor.to_source(tree)
['def', 'translate', '(', 'script', ')', ':', 'tree', '=', 'ast', '.', 'parse', '(', 'script', ')', 'ZiplineImportVisitor', '(', ')', '.', 'visit', '(', 'tree', ')', 'return', 'astor', '.', 'to_source', '(', 'tree', ')']
translate zipline script into pylivetrader script.
['translate', 'zipline', 'script', 'into', 'pylivetrader', 'script', '.']
train
https://github.com/alpacahq/pylivetrader/blob/fd328b6595428c0789d9f218df34623f83a02b8b/pylivetrader/loader.py#L72-L79
6,520
sorgerlab/indra
indra/preassembler/hierarchy_manager.py
HierarchyManager.load_from_rdf_file
def load_from_rdf_file(self, rdf_file): """Initialize given an RDF input file representing the hierarchy." Parameters ---------- rdf_file : str Path to an RDF file. """ self.graph = rdflib.Graph() self.graph.parse(os.path.abspath(rdf_file), format='nt') self.initialize()
python
def load_from_rdf_file(self, rdf_file): """Initialize given an RDF input file representing the hierarchy." Parameters ---------- rdf_file : str Path to an RDF file. """ self.graph = rdflib.Graph() self.graph.parse(os.path.abspath(rdf_file), format='nt') self.initialize()
['def', 'load_from_rdf_file', '(', 'self', ',', 'rdf_file', ')', ':', 'self', '.', 'graph', '=', 'rdflib', '.', 'Graph', '(', ')', 'self', '.', 'graph', '.', 'parse', '(', 'os', '.', 'path', '.', 'abspath', '(', 'rdf_file', ')', ',', 'format', '=', "'nt'", ')', 'self', '.', 'initialize', '(', ')']
Initialize given an RDF input file representing the hierarchy." Parameters ---------- rdf_file : str Path to an RDF file.
['Initialize', 'given', 'an', 'RDF', 'input', 'file', 'representing', 'the', 'hierarchy', '.']
train
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/hierarchy_manager.py#L62-L72
6,521
piotr-rusin/spam-lists
spam_lists/validation.py
is_valid_url
def is_valid_url(value): """Check if given value is a valid URL string. :param value: a value to test :returns: True if the value is valid """ match = URL_REGEX.match(value) host_str = urlparse(value).hostname return match and is_valid_host(host_str)
python
def is_valid_url(value): """Check if given value is a valid URL string. :param value: a value to test :returns: True if the value is valid """ match = URL_REGEX.match(value) host_str = urlparse(value).hostname return match and is_valid_host(host_str)
['def', 'is_valid_url', '(', 'value', ')', ':', 'match', '=', 'URL_REGEX', '.', 'match', '(', 'value', ')', 'host_str', '=', 'urlparse', '(', 'value', ')', '.', 'hostname', 'return', 'match', 'and', 'is_valid_host', '(', 'host_str', ')']
Check if given value is a valid URL string. :param value: a value to test :returns: True if the value is valid
['Check', 'if', 'given', 'value', 'is', 'a', 'valid', 'URL', 'string', '.']
train
https://github.com/piotr-rusin/spam-lists/blob/fd616e8761b28f3eaa503fee5e45f7748e8f88f2/spam_lists/validation.py#L33-L41
6,522
jreese/aiosqlite
aiosqlite/core.py
Connection.execute_insert
async def execute_insert( self, sql: str, parameters: Iterable[Any] = None ) -> Optional[sqlite3.Row]: """Helper to insert and get the last_insert_rowid.""" if parameters is None: parameters = [] return await self._execute(self._execute_insert, sql, parameters)
python
async def execute_insert( self, sql: str, parameters: Iterable[Any] = None ) -> Optional[sqlite3.Row]: """Helper to insert and get the last_insert_rowid.""" if parameters is None: parameters = [] return await self._execute(self._execute_insert, sql, parameters)
['async', 'def', 'execute_insert', '(', 'self', ',', 'sql', ':', 'str', ',', 'parameters', ':', 'Iterable', '[', 'Any', ']', '=', 'None', ')', '->', 'Optional', '[', 'sqlite3', '.', 'Row', ']', ':', 'if', 'parameters', 'is', 'None', ':', 'parameters', '=', '[', ']', 'return', 'await', 'self', '.', '_execute', '(', 'self', '.', '_execute_insert', ',', 'sql', ',', 'parameters', ')']
Helper to insert and get the last_insert_rowid.
['Helper', 'to', 'insert', 'and', 'get', 'the', 'last_insert_rowid', '.']
train
https://github.com/jreese/aiosqlite/blob/3f548b568b8db9a57022b6e2c9627f5cdefb983f/aiosqlite/core.py#L213-L219
6,523
dchaplinsky/LT2OpenCorpora
lt2opencorpora/convert.py
TagSet._get_group_no
def _get_group_no(self, tag_name): """ Takes tag name and returns the number of the group to which tag belongs """ if tag_name in self.full: return self.groups.index(self.full[tag_name]["parent"]) else: return len(self.groups)
python
def _get_group_no(self, tag_name): """ Takes tag name and returns the number of the group to which tag belongs """ if tag_name in self.full: return self.groups.index(self.full[tag_name]["parent"]) else: return len(self.groups)
['def', '_get_group_no', '(', 'self', ',', 'tag_name', ')', ':', 'if', 'tag_name', 'in', 'self', '.', 'full', ':', 'return', 'self', '.', 'groups', '.', 'index', '(', 'self', '.', 'full', '[', 'tag_name', ']', '[', '"parent"', ']', ')', 'else', ':', 'return', 'len', '(', 'self', '.', 'groups', ')']
Takes tag name and returns the number of the group to which tag belongs
['Takes', 'tag', 'name', 'and', 'returns', 'the', 'number', 'of', 'the', 'group', 'to', 'which', 'tag', 'belongs']
train
https://github.com/dchaplinsky/LT2OpenCorpora/blob/7bf48098ec2db4c8955a660fd0c1b80a16e43054/lt2opencorpora/convert.py#L90-L98
6,524
abseil/abseil-py
absl/flags/_argument_parser.py
BaseListParser.parse
def parse(self, argument): """See base class.""" if isinstance(argument, list): return argument elif not argument: return [] else: return [s.strip() for s in argument.split(self._token)]
python
def parse(self, argument): """See base class.""" if isinstance(argument, list): return argument elif not argument: return [] else: return [s.strip() for s in argument.split(self._token)]
['def', 'parse', '(', 'self', ',', 'argument', ')', ':', 'if', 'isinstance', '(', 'argument', ',', 'list', ')', ':', 'return', 'argument', 'elif', 'not', 'argument', ':', 'return', '[', ']', 'else', ':', 'return', '[', 's', '.', 'strip', '(', ')', 'for', 's', 'in', 'argument', '.', 'split', '(', 'self', '.', '_token', ')', ']']
See base class.
['See', 'base', 'class', '.']
train
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_argument_parser.py#L474-L481
6,525
senaite/senaite.jsonapi
src/senaite/jsonapi/api.py
deactivate_object
def deactivate_object(brain_or_object): """Deactivate the given object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Nothing :rtype: None """ obj = get_object(brain_or_object) # we do not want to delete the site root! if is_root(obj): fail(401, "Deactivating the Portal is not allowed") try: do_transition_for(brain_or_object, "deactivate") except Unauthorized: fail(401, "Not allowed to deactivate object '%s'" % obj.getId())
python
def deactivate_object(brain_or_object): """Deactivate the given object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Nothing :rtype: None """ obj = get_object(brain_or_object) # we do not want to delete the site root! if is_root(obj): fail(401, "Deactivating the Portal is not allowed") try: do_transition_for(brain_or_object, "deactivate") except Unauthorized: fail(401, "Not allowed to deactivate object '%s'" % obj.getId())
['def', 'deactivate_object', '(', 'brain_or_object', ')', ':', 'obj', '=', 'get_object', '(', 'brain_or_object', ')', '# we do not want to delete the site root!', 'if', 'is_root', '(', 'obj', ')', ':', 'fail', '(', '401', ',', '"Deactivating the Portal is not allowed"', ')', 'try', ':', 'do_transition_for', '(', 'brain_or_object', ',', '"deactivate"', ')', 'except', 'Unauthorized', ':', 'fail', '(', '401', ',', '"Not allowed to deactivate object \'%s\'"', '%', 'obj', '.', 'getId', '(', ')', ')']
Deactivate the given object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Nothing :rtype: None
['Deactivate', 'the', 'given', 'object']
train
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/api.py#L1466-L1481
6,526
dwavesystems/dimod
dimod/binary_quadratic_model.py
BinaryQuadraticModel.energies
def energies(self, samples_like, dtype=np.float): """Determine the energies of the given samples. Args: samples_like (samples_like): A collection of raw samples. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`): The data type of the returned energies. Returns: :obj:`numpy.ndarray`: The energies. """ samples, labels = as_samples(samples_like) if all(v == idx for idx, v in enumerate(labels)): ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(dtype=dtype) else: ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(variable_order=labels, dtype=dtype) energies = samples.dot(ldata) + (samples[:, irow]*samples[:, icol]).dot(qdata) + offset return np.asarray(energies, dtype=dtype)
python
def energies(self, samples_like, dtype=np.float): """Determine the energies of the given samples. Args: samples_like (samples_like): A collection of raw samples. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`): The data type of the returned energies. Returns: :obj:`numpy.ndarray`: The energies. """ samples, labels = as_samples(samples_like) if all(v == idx for idx, v in enumerate(labels)): ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(dtype=dtype) else: ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(variable_order=labels, dtype=dtype) energies = samples.dot(ldata) + (samples[:, irow]*samples[:, icol]).dot(qdata) + offset return np.asarray(energies, dtype=dtype)
['def', 'energies', '(', 'self', ',', 'samples_like', ',', 'dtype', '=', 'np', '.', 'float', ')', ':', 'samples', ',', 'labels', '=', 'as_samples', '(', 'samples_like', ')', 'if', 'all', '(', 'v', '==', 'idx', 'for', 'idx', ',', 'v', 'in', 'enumerate', '(', 'labels', ')', ')', ':', 'ldata', ',', '(', 'irow', ',', 'icol', ',', 'qdata', ')', ',', 'offset', '=', 'self', '.', 'to_numpy_vectors', '(', 'dtype', '=', 'dtype', ')', 'else', ':', 'ldata', ',', '(', 'irow', ',', 'icol', ',', 'qdata', ')', ',', 'offset', '=', 'self', '.', 'to_numpy_vectors', '(', 'variable_order', '=', 'labels', ',', 'dtype', '=', 'dtype', ')', 'energies', '=', 'samples', '.', 'dot', '(', 'ldata', ')', '+', '(', 'samples', '[', ':', ',', 'irow', ']', '*', 'samples', '[', ':', ',', 'icol', ']', ')', '.', 'dot', '(', 'qdata', ')', '+', 'offset', 'return', 'np', '.', 'asarray', '(', 'energies', ',', 'dtype', '=', 'dtype', ')']
Determine the energies of the given samples. Args: samples_like (samples_like): A collection of raw samples. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`): The data type of the returned energies. Returns: :obj:`numpy.ndarray`: The energies.
['Determine', 'the', 'energies', 'of', 'the', 'given', 'samples', '.']
train
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1497-L1520
6,527
rongcloud/server-sdk-python
rongcloud/sms.py
SMS.sendCode
def sendCode(self, mobile, templateId, region, verifyId=None, verifyCode=None): """ 发送短信验证码方法。 方法 @param mobile:接收短信验证码的目标手机号,每分钟同一手机号只能发送一次短信验证码,同一手机号 1 小时内最多发送 3 次。(必传) @param templateId:短信模板 Id,在开发者后台->短信服务->服务设置->短信模版中获取。(必传) @param region:手机号码所属国家区号,目前只支持中图区号 86) @param verifyId:图片验证标识 Id ,开启图片验证功能后此参数必传,否则可以不传。在获取图片验证码方法返回值中获取。 @param verifyCode:图片验证码,开启图片验证功能后此参数必传,否则可以不传。 @return code:返回码,200 为正常。 @return sessionId:短信验证码唯一标识。 @return errorMessage:错误信息。 """ desc = { "name": "SMSSendCodeReslut", "desc": " SMSSendCodeReslut 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "sessionId", "type": "String", "desc": "短信验证码唯一标识。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('SMS', 'POST', 'application/x-www-form-urlencoded'), action='/sendCode.json', params={ "mobile": mobile, "templateId": templateId, "region": region, "verifyId": verifyId, "verifyCode": verifyCode }) return Response(r, desc)
python
def sendCode(self, mobile, templateId, region, verifyId=None, verifyCode=None): """ 发送短信验证码方法。 方法 @param mobile:接收短信验证码的目标手机号,每分钟同一手机号只能发送一次短信验证码,同一手机号 1 小时内最多发送 3 次。(必传) @param templateId:短信模板 Id,在开发者后台->短信服务->服务设置->短信模版中获取。(必传) @param region:手机号码所属国家区号,目前只支持中图区号 86) @param verifyId:图片验证标识 Id ,开启图片验证功能后此参数必传,否则可以不传。在获取图片验证码方法返回值中获取。 @param verifyCode:图片验证码,开启图片验证功能后此参数必传,否则可以不传。 @return code:返回码,200 为正常。 @return sessionId:短信验证码唯一标识。 @return errorMessage:错误信息。 """ desc = { "name": "SMSSendCodeReslut", "desc": " SMSSendCodeReslut 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "sessionId", "type": "String", "desc": "短信验证码唯一标识。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('SMS', 'POST', 'application/x-www-form-urlencoded'), action='/sendCode.json', params={ "mobile": mobile, "templateId": templateId, "region": region, "verifyId": verifyId, "verifyCode": verifyCode }) return Response(r, desc)
['def', 'sendCode', '(', 'self', ',', 'mobile', ',', 'templateId', ',', 'region', ',', 'verifyId', '=', 'None', ',', 'verifyCode', '=', 'None', ')', ':', 'desc', '=', '{', '"name"', ':', '"SMSSendCodeReslut"', ',', '"desc"', ':', '" SMSSendCodeReslut 成功返回结果",', '', '"fields"', ':', '[', '{', '"name"', ':', '"code"', ',', '"type"', ':', '"Integer"', ',', '"desc"', ':', '"返回码,200 为正常。"', '}', ',', '{', '"name"', ':', '"sessionId"', ',', '"type"', ':', '"String"', ',', '"desc"', ':', '"短信验证码唯一标识。"', '}', ',', '{', '"name"', ':', '"errorMessage"', ',', '"type"', ':', '"String"', ',', '"desc"', ':', '"错误信息。"', '}', ']', '}', 'r', '=', 'self', '.', 'call_api', '(', 'method', '=', '(', "'SMS'", ',', "'POST'", ',', "'application/x-www-form-urlencoded'", ')', ',', 'action', '=', "'/sendCode.json'", ',', 'params', '=', '{', '"mobile"', ':', 'mobile', ',', '"templateId"', ':', 'templateId', ',', '"region"', ':', 'region', ',', '"verifyId"', ':', 'verifyId', ',', '"verifyCode"', ':', 'verifyCode', '}', ')', 'return', 'Response', '(', 'r', ',', 'desc', ')']
发送短信验证码方法。 方法 @param mobile:接收短信验证码的目标手机号,每分钟同一手机号只能发送一次短信验证码,同一手机号 1 小时内最多发送 3 次。(必传) @param templateId:短信模板 Id,在开发者后台->短信服务->服务设置->短信模版中获取。(必传) @param region:手机号码所属国家区号,目前只支持中图区号 86) @param verifyId:图片验证标识 Id ,开启图片验证功能后此参数必传,否则可以不传。在获取图片验证码方法返回值中获取。 @param verifyCode:图片验证码,开启图片验证功能后此参数必传,否则可以不传。 @return code:返回码,200 为正常。 @return sessionId:短信验证码唯一标识。 @return errorMessage:错误信息。
['发送短信验证码方法。', '方法']
train
https://github.com/rongcloud/server-sdk-python/blob/3daadd8b67c84cc5d2a9419e8d45fd69c9baf976/rongcloud/sms.py#L47-L93
6,528
zhmcclient/python-zhmcclient
zhmcclient/_manager.py
_NameUriCache.get
def get(self, name): """ Get the resource URI for a specified resource name. If an entry for the specified resource name does not exist in the Name-URI cache, the cache is refreshed from the HMC with all resources of the manager holding this cache. If an entry for the specified resource name still does not exist after that, ``NotFound`` is raised. """ self.auto_invalidate() try: return self._uris[name] except KeyError: self.refresh() try: return self._uris[name] except KeyError: raise NotFound({self._manager._name_prop: name}, self._manager)
python
def get(self, name): """ Get the resource URI for a specified resource name. If an entry for the specified resource name does not exist in the Name-URI cache, the cache is refreshed from the HMC with all resources of the manager holding this cache. If an entry for the specified resource name still does not exist after that, ``NotFound`` is raised. """ self.auto_invalidate() try: return self._uris[name] except KeyError: self.refresh() try: return self._uris[name] except KeyError: raise NotFound({self._manager._name_prop: name}, self._manager)
['def', 'get', '(', 'self', ',', 'name', ')', ':', 'self', '.', 'auto_invalidate', '(', ')', 'try', ':', 'return', 'self', '.', '_uris', '[', 'name', ']', 'except', 'KeyError', ':', 'self', '.', 'refresh', '(', ')', 'try', ':', 'return', 'self', '.', '_uris', '[', 'name', ']', 'except', 'KeyError', ':', 'raise', 'NotFound', '(', '{', 'self', '.', '_manager', '.', '_name_prop', ':', 'name', '}', ',', 'self', '.', '_manager', ')']
Get the resource URI for a specified resource name. If an entry for the specified resource name does not exist in the Name-URI cache, the cache is refreshed from the HMC with all resources of the manager holding this cache. If an entry for the specified resource name still does not exist after that, ``NotFound`` is raised.
['Get', 'the', 'resource', 'URI', 'for', 'a', 'specified', 'resource', 'name', '.']
train
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_manager.py#L75-L94
6,529
consbio/ncdjango
ncdjango/interfaces/arcgis/views.py
GetImageView.get_render_configurations
def get_render_configurations(self, request, **kwargs): """Render image interface""" data = self.process_form_data(self._get_form_defaults(), kwargs) variable_set = self.get_variable_set(self.service.variable_set.order_by('index'), data) base_config = ImageConfiguration( extent=data['bbox'], size=data['size'], image_format=data['image_format'], background_color=TRANSPARENT_BACKGROUND_COLOR if data.get('transparent') else DEFAULT_BACKGROUND_COLOR ) return base_config, self.apply_time_to_configurations([RenderConfiguration(v) for v in variable_set], data)
python
def get_render_configurations(self, request, **kwargs): """Render image interface""" data = self.process_form_data(self._get_form_defaults(), kwargs) variable_set = self.get_variable_set(self.service.variable_set.order_by('index'), data) base_config = ImageConfiguration( extent=data['bbox'], size=data['size'], image_format=data['image_format'], background_color=TRANSPARENT_BACKGROUND_COLOR if data.get('transparent') else DEFAULT_BACKGROUND_COLOR ) return base_config, self.apply_time_to_configurations([RenderConfiguration(v) for v in variable_set], data)
['def', 'get_render_configurations', '(', 'self', ',', 'request', ',', '*', '*', 'kwargs', ')', ':', 'data', '=', 'self', '.', 'process_form_data', '(', 'self', '.', '_get_form_defaults', '(', ')', ',', 'kwargs', ')', 'variable_set', '=', 'self', '.', 'get_variable_set', '(', 'self', '.', 'service', '.', 'variable_set', '.', 'order_by', '(', "'index'", ')', ',', 'data', ')', 'base_config', '=', 'ImageConfiguration', '(', 'extent', '=', 'data', '[', "'bbox'", ']', ',', 'size', '=', 'data', '[', "'size'", ']', ',', 'image_format', '=', 'data', '[', "'image_format'", ']', ',', 'background_color', '=', 'TRANSPARENT_BACKGROUND_COLOR', 'if', 'data', '.', 'get', '(', "'transparent'", ')', 'else', 'DEFAULT_BACKGROUND_COLOR', ')', 'return', 'base_config', ',', 'self', '.', 'apply_time_to_configurations', '(', '[', 'RenderConfiguration', '(', 'v', ')', 'for', 'v', 'in', 'variable_set', ']', ',', 'data', ')']
Render image interface
['Render', 'image', 'interface']
train
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L284-L297
6,530
cltk/cltk
cltk/phonology/utils.py
Consonant.is_equal
def is_equal(self, other_consonnant): """ >>> v_consonant = Consonant(Place.labio_dental, Manner.fricative, True, "v", False) >>> f_consonant = Consonant(Place.labio_dental, Manner.fricative, False, "f", False) >>> v_consonant.is_equal(f_consonant) False :param other_consonnant: :return: """ return self.place == other_consonnant.place and self.manner == other_consonnant.manner and \ self.voiced == other_consonnant.voiced and self.geminate == other_consonnant.geminate
python
def is_equal(self, other_consonnant): """ >>> v_consonant = Consonant(Place.labio_dental, Manner.fricative, True, "v", False) >>> f_consonant = Consonant(Place.labio_dental, Manner.fricative, False, "f", False) >>> v_consonant.is_equal(f_consonant) False :param other_consonnant: :return: """ return self.place == other_consonnant.place and self.manner == other_consonnant.manner and \ self.voiced == other_consonnant.voiced and self.geminate == other_consonnant.geminate
['def', 'is_equal', '(', 'self', ',', 'other_consonnant', ')', ':', 'return', 'self', '.', 'place', '==', 'other_consonnant', '.', 'place', 'and', 'self', '.', 'manner', '==', 'other_consonnant', '.', 'manner', 'and', 'self', '.', 'voiced', '==', 'other_consonnant', '.', 'voiced', 'and', 'self', '.', 'geminate', '==', 'other_consonnant', '.', 'geminate']
>>> v_consonant = Consonant(Place.labio_dental, Manner.fricative, True, "v", False) >>> f_consonant = Consonant(Place.labio_dental, Manner.fricative, False, "f", False) >>> v_consonant.is_equal(f_consonant) False :param other_consonnant: :return:
['>>>', 'v_consonant', '=', 'Consonant', '(', 'Place', '.', 'labio_dental', 'Manner', '.', 'fricative', 'True', 'v', 'False', ')', '>>>', 'f_consonant', '=', 'Consonant', '(', 'Place', '.', 'labio_dental', 'Manner', '.', 'fricative', 'False', 'f', 'False', ')', '>>>', 'v_consonant', '.', 'is_equal', '(', 'f_consonant', ')', 'False']
train
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/phonology/utils.py#L148-L159
6,531
wmayner/pyphi
pyphi/macro.py
MacroSubsystem.potential_purviews
def potential_purviews(self, direction, mechanism, purviews=False): """Override Subsystem implementation using Network-level indices.""" all_purviews = utils.powerset(self.node_indices) return irreducible_purviews( self.cm, direction, mechanism, all_purviews)
python
def potential_purviews(self, direction, mechanism, purviews=False): """Override Subsystem implementation using Network-level indices.""" all_purviews = utils.powerset(self.node_indices) return irreducible_purviews( self.cm, direction, mechanism, all_purviews)
['def', 'potential_purviews', '(', 'self', ',', 'direction', ',', 'mechanism', ',', 'purviews', '=', 'False', ')', ':', 'all_purviews', '=', 'utils', '.', 'powerset', '(', 'self', '.', 'node_indices', ')', 'return', 'irreducible_purviews', '(', 'self', '.', 'cm', ',', 'direction', ',', 'mechanism', ',', 'all_purviews', ')']
Override Subsystem implementation using Network-level indices.
['Override', 'Subsystem', 'implementation', 'using', 'Network', '-', 'level', 'indices', '.']
train
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/macro.py#L354-L358
6,532
AtomHash/evernode
evernode/classes/paginate.py
Paginate.json_paginate
def json_paginate(self, base_url, page_number): """ Return a dict for a JSON paginate """ data = self.page(page_number) first_id = None last_id = None if data: first_id = data[0].id last_id = data[-1].id return { 'meta': { 'total_pages': self.max_pages, 'first_id': first_id, 'last_id': last_id, 'current_page': page_number }, 'data': self.page(page_number), 'links': self.links(base_url, page_number) }
python
def json_paginate(self, base_url, page_number): """ Return a dict for a JSON paginate """ data = self.page(page_number) first_id = None last_id = None if data: first_id = data[0].id last_id = data[-1].id return { 'meta': { 'total_pages': self.max_pages, 'first_id': first_id, 'last_id': last_id, 'current_page': page_number }, 'data': self.page(page_number), 'links': self.links(base_url, page_number) }
['def', 'json_paginate', '(', 'self', ',', 'base_url', ',', 'page_number', ')', ':', 'data', '=', 'self', '.', 'page', '(', 'page_number', ')', 'first_id', '=', 'None', 'last_id', '=', 'None', 'if', 'data', ':', 'first_id', '=', 'data', '[', '0', ']', '.', 'id', 'last_id', '=', 'data', '[', '-', '1', ']', '.', 'id', 'return', '{', "'meta'", ':', '{', "'total_pages'", ':', 'self', '.', 'max_pages', ',', "'first_id'", ':', 'first_id', ',', "'last_id'", ':', 'last_id', ',', "'current_page'", ':', 'page_number', '}', ',', "'data'", ':', 'self', '.', 'page', '(', 'page_number', ')', ',', "'links'", ':', 'self', '.', 'links', '(', 'base_url', ',', 'page_number', ')', '}']
Return a dict for a JSON paginate
['Return', 'a', 'dict', 'for', 'a', 'JSON', 'paginate']
train
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/paginate.py#L105-L122
6,533
jsvine/spectra
spectra/core.py
Color.to
def to(self, space): """ Convert color to a different color space. :param str space: Name of the color space. :rtype: Color :returns: A new spectra.Color in the given color space. """ if space == self.space: return self new_color = convert_color(self.color_object, COLOR_SPACES[space]) return self.__class__(space, *new_color.get_value_tuple())
python
def to(self, space): """ Convert color to a different color space. :param str space: Name of the color space. :rtype: Color :returns: A new spectra.Color in the given color space. """ if space == self.space: return self new_color = convert_color(self.color_object, COLOR_SPACES[space]) return self.__class__(space, *new_color.get_value_tuple())
['def', 'to', '(', 'self', ',', 'space', ')', ':', 'if', 'space', '==', 'self', '.', 'space', ':', 'return', 'self', 'new_color', '=', 'convert_color', '(', 'self', '.', 'color_object', ',', 'COLOR_SPACES', '[', 'space', ']', ')', 'return', 'self', '.', '__class__', '(', 'space', ',', '*', 'new_color', '.', 'get_value_tuple', '(', ')', ')']
Convert color to a different color space. :param str space: Name of the color space. :rtype: Color :returns: A new spectra.Color in the given color space.
['Convert', 'color', 'to', 'a', 'different', 'color', 'space', '.']
train
https://github.com/jsvine/spectra/blob/2269a0ae9b5923154b15bd661fb81179608f7ec2/spectra/core.py#L45-L56
6,534
Sanji-IO/sanji
sanji/model/__init__.py
Model.update
def update(self, id, newObj): """Update a object Args: id (int): Target Object ID newObj (object): New object will be merged into original object Returns: Object: Updated object None: If specified object id is not found MultipleInvalid: If input object is invaild """ newObj = self.validation(newObj) for obj in self.model.db: if obj["id"] != id: continue newObj.pop("id", None) obj.update(newObj) obj = self._cast_model(obj) if not self._batch.enable.is_set(): self.model.save_db() return obj return None
python
def update(self, id, newObj): """Update a object Args: id (int): Target Object ID newObj (object): New object will be merged into original object Returns: Object: Updated object None: If specified object id is not found MultipleInvalid: If input object is invaild """ newObj = self.validation(newObj) for obj in self.model.db: if obj["id"] != id: continue newObj.pop("id", None) obj.update(newObj) obj = self._cast_model(obj) if not self._batch.enable.is_set(): self.model.save_db() return obj return None
['def', 'update', '(', 'self', ',', 'id', ',', 'newObj', ')', ':', 'newObj', '=', 'self', '.', 'validation', '(', 'newObj', ')', 'for', 'obj', 'in', 'self', '.', 'model', '.', 'db', ':', 'if', 'obj', '[', '"id"', ']', '!=', 'id', ':', 'continue', 'newObj', '.', 'pop', '(', '"id"', ',', 'None', ')', 'obj', '.', 'update', '(', 'newObj', ')', 'obj', '=', 'self', '.', '_cast_model', '(', 'obj', ')', 'if', 'not', 'self', '.', '_batch', '.', 'enable', '.', 'is_set', '(', ')', ':', 'self', '.', 'model', '.', 'save_db', '(', ')', 'return', 'obj', 'return', 'None']
Update a object Args: id (int): Target Object ID newObj (object): New object will be merged into original object Returns: Object: Updated object None: If specified object id is not found MultipleInvalid: If input object is invaild
['Update', 'a', 'object', 'Args', ':', 'id', '(', 'int', ')', ':', 'Target', 'Object', 'ID', 'newObj', '(', 'object', ')', ':', 'New', 'object', 'will', 'be', 'merged', 'into', 'original', 'object', 'Returns', ':', 'Object', ':', 'Updated', 'object', 'None', ':', 'If', 'specified', 'object', 'id', 'is', 'not', 'found', 'MultipleInvalid', ':', 'If', 'input', 'object', 'is', 'invaild']
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/model/__init__.py#L129-L151
6,535
hollenstein/maspy
maspy/proteindb.py
ProteinDatabase._calculateCoverageMasks
def _calculateCoverageMasks(proteindb, peptidedb): """Calcualte the sequence coverage masks for all proteindb elements. Private method used by :class:`ProteinDatabase`. A coverage mask is a numpy boolean array with the length of the protein sequence. Each protein position that has been covered in at least one peptide is set to True. Coverage masks are calculated for unique and for shared peptides. Peptides are matched to proteins according to positions derived by the digestion of the FASTA file. Alternatively peptides could also be matched to proteins just by sequence as it is done in :func:`pyteomics.parser.coverage`, but this is not the case here. :param proteindb: a dictionary containing :class:`ProteinSequence` entries, for example ``ProteinDatabase.proteins`` :param proteindb: a dictionary containing :class:`PeptideSequence` entries, for example ``ProteinDatabase.peptides`` Sets two attributes for each ``ProteinSequence`` entry: ``.coverageMaskUnique`` = coverage mask of unique peptides ``.coverageMaskShared`` = coverage mask of shared peptides """ for proteinId, proteinEntry in viewitems(proteindb): coverageMaskUnique = numpy.zeros(proteinEntry.length(), dtype='bool') for peptide in proteinEntry.uniquePeptides: startPos, endPos = peptidedb[peptide].proteinPositions[proteinId] coverageMaskUnique[startPos-1:endPos] = True coverageMaskShared = numpy.zeros(proteinEntry.length(), dtype='bool') for peptide in proteinEntry.sharedPeptides: startPos, endPos = peptidedb[peptide].proteinPositions[proteinId] coverageMaskShared[startPos-1:endPos] = True setattr(proteinEntry, 'coverageMaskUnique', coverageMaskUnique) setattr(proteinEntry, 'coverageMaskShared', coverageMaskShared)
python
def _calculateCoverageMasks(proteindb, peptidedb): """Calcualte the sequence coverage masks for all proteindb elements. Private method used by :class:`ProteinDatabase`. A coverage mask is a numpy boolean array with the length of the protein sequence. Each protein position that has been covered in at least one peptide is set to True. Coverage masks are calculated for unique and for shared peptides. Peptides are matched to proteins according to positions derived by the digestion of the FASTA file. Alternatively peptides could also be matched to proteins just by sequence as it is done in :func:`pyteomics.parser.coverage`, but this is not the case here. :param proteindb: a dictionary containing :class:`ProteinSequence` entries, for example ``ProteinDatabase.proteins`` :param proteindb: a dictionary containing :class:`PeptideSequence` entries, for example ``ProteinDatabase.peptides`` Sets two attributes for each ``ProteinSequence`` entry: ``.coverageMaskUnique`` = coverage mask of unique peptides ``.coverageMaskShared`` = coverage mask of shared peptides """ for proteinId, proteinEntry in viewitems(proteindb): coverageMaskUnique = numpy.zeros(proteinEntry.length(), dtype='bool') for peptide in proteinEntry.uniquePeptides: startPos, endPos = peptidedb[peptide].proteinPositions[proteinId] coverageMaskUnique[startPos-1:endPos] = True coverageMaskShared = numpy.zeros(proteinEntry.length(), dtype='bool') for peptide in proteinEntry.sharedPeptides: startPos, endPos = peptidedb[peptide].proteinPositions[proteinId] coverageMaskShared[startPos-1:endPos] = True setattr(proteinEntry, 'coverageMaskUnique', coverageMaskUnique) setattr(proteinEntry, 'coverageMaskShared', coverageMaskShared)
['def', '_calculateCoverageMasks', '(', 'proteindb', ',', 'peptidedb', ')', ':', 'for', 'proteinId', ',', 'proteinEntry', 'in', 'viewitems', '(', 'proteindb', ')', ':', 'coverageMaskUnique', '=', 'numpy', '.', 'zeros', '(', 'proteinEntry', '.', 'length', '(', ')', ',', 'dtype', '=', "'bool'", ')', 'for', 'peptide', 'in', 'proteinEntry', '.', 'uniquePeptides', ':', 'startPos', ',', 'endPos', '=', 'peptidedb', '[', 'peptide', ']', '.', 'proteinPositions', '[', 'proteinId', ']', 'coverageMaskUnique', '[', 'startPos', '-', '1', ':', 'endPos', ']', '=', 'True', 'coverageMaskShared', '=', 'numpy', '.', 'zeros', '(', 'proteinEntry', '.', 'length', '(', ')', ',', 'dtype', '=', "'bool'", ')', 'for', 'peptide', 'in', 'proteinEntry', '.', 'sharedPeptides', ':', 'startPos', ',', 'endPos', '=', 'peptidedb', '[', 'peptide', ']', '.', 'proteinPositions', '[', 'proteinId', ']', 'coverageMaskShared', '[', 'startPos', '-', '1', ':', 'endPos', ']', '=', 'True', 'setattr', '(', 'proteinEntry', ',', "'coverageMaskUnique'", ',', 'coverageMaskUnique', ')', 'setattr', '(', 'proteinEntry', ',', "'coverageMaskShared'", ',', 'coverageMaskShared', ')']
Calcualte the sequence coverage masks for all proteindb elements. Private method used by :class:`ProteinDatabase`. A coverage mask is a numpy boolean array with the length of the protein sequence. Each protein position that has been covered in at least one peptide is set to True. Coverage masks are calculated for unique and for shared peptides. Peptides are matched to proteins according to positions derived by the digestion of the FASTA file. Alternatively peptides could also be matched to proteins just by sequence as it is done in :func:`pyteomics.parser.coverage`, but this is not the case here. :param proteindb: a dictionary containing :class:`ProteinSequence` entries, for example ``ProteinDatabase.proteins`` :param proteindb: a dictionary containing :class:`PeptideSequence` entries, for example ``ProteinDatabase.peptides`` Sets two attributes for each ``ProteinSequence`` entry: ``.coverageMaskUnique`` = coverage mask of unique peptides ``.coverageMaskShared`` = coverage mask of shared peptides
['Calcualte', 'the', 'sequence', 'coverage', 'masks', 'for', 'all', 'proteindb', 'elements', '.', 'Private', 'method', 'used', 'by', ':', 'class', ':', 'ProteinDatabase', '.']
train
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/proteindb.py#L334-L367
6,536
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py
brocade_threshold_monitor.threshold_monitor_hidden_threshold_monitor_Memory_retry
def threshold_monitor_hidden_threshold_monitor_Memory_retry(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") Memory = ET.SubElement(threshold_monitor, "Memory") retry = ET.SubElement(Memory, "retry") retry.text = kwargs.pop('retry') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def threshold_monitor_hidden_threshold_monitor_Memory_retry(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") Memory = ET.SubElement(threshold_monitor, "Memory") retry = ET.SubElement(Memory, "retry") retry.text = kwargs.pop('retry') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'threshold_monitor_hidden_threshold_monitor_Memory_retry', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'threshold_monitor_hidden', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"threshold-monitor-hidden"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-threshold-monitor"', ')', 'threshold_monitor', '=', 'ET', '.', 'SubElement', '(', 'threshold_monitor_hidden', ',', '"threshold-monitor"', ')', 'Memory', '=', 'ET', '.', 'SubElement', '(', 'threshold_monitor', ',', '"Memory"', ')', 'retry', '=', 'ET', '.', 'SubElement', '(', 'Memory', ',', '"retry"', ')', 'retry', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'retry'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py#L487-L498
6,537
onnx/onnxmltools
onnxutils/onnxconverter_common/optimizer.py
Solution.delete_node_1ton
def delete_node_1ton(node_list, begin, node, end): # type: ([],LinkedNode, LinkedNode, LinkedNode)->[] """ delete the node which has 1-input and n-output """ if end is None: assert end is not None end = node.successor elif not isinstance(end, list): end = [end] if any(e_.in_or_out for e_ in end): # if the end is output node, the output name will be kept to avoid the model output name updating. begin.out_redirect(node.single_input, node.single_output) else: for ne_ in end: target_var_name = node.single_input # since the output info never be updated, except the final. assert target_var_name in begin.output.values() ne_.in_redirect(node.single_output, target_var_name) begin.successor = [v_ for v_ in begin.successor if v_ != node] + node.successor for ne_ in end: ne_.precedence = [begin if v_ == node else v_ for v_ in ne_.precedence] node_list.remove(node) return node_list
python
def delete_node_1ton(node_list, begin, node, end): # type: ([],LinkedNode, LinkedNode, LinkedNode)->[] """ delete the node which has 1-input and n-output """ if end is None: assert end is not None end = node.successor elif not isinstance(end, list): end = [end] if any(e_.in_or_out for e_ in end): # if the end is output node, the output name will be kept to avoid the model output name updating. begin.out_redirect(node.single_input, node.single_output) else: for ne_ in end: target_var_name = node.single_input # since the output info never be updated, except the final. assert target_var_name in begin.output.values() ne_.in_redirect(node.single_output, target_var_name) begin.successor = [v_ for v_ in begin.successor if v_ != node] + node.successor for ne_ in end: ne_.precedence = [begin if v_ == node else v_ for v_ in ne_.precedence] node_list.remove(node) return node_list
['def', 'delete_node_1ton', '(', 'node_list', ',', 'begin', ',', 'node', ',', 'end', ')', ':', '# type: ([],LinkedNode, LinkedNode, LinkedNode)->[]', 'if', 'end', 'is', 'None', ':', 'assert', 'end', 'is', 'not', 'None', 'end', '=', 'node', '.', 'successor', 'elif', 'not', 'isinstance', '(', 'end', ',', 'list', ')', ':', 'end', '=', '[', 'end', ']', 'if', 'any', '(', 'e_', '.', 'in_or_out', 'for', 'e_', 'in', 'end', ')', ':', '# if the end is output node, the output name will be kept to avoid the model output name updating.', 'begin', '.', 'out_redirect', '(', 'node', '.', 'single_input', ',', 'node', '.', 'single_output', ')', 'else', ':', 'for', 'ne_', 'in', 'end', ':', 'target_var_name', '=', 'node', '.', 'single_input', '# since the output info never be updated, except the final.', 'assert', 'target_var_name', 'in', 'begin', '.', 'output', '.', 'values', '(', ')', 'ne_', '.', 'in_redirect', '(', 'node', '.', 'single_output', ',', 'target_var_name', ')', 'begin', '.', 'successor', '=', '[', 'v_', 'for', 'v_', 'in', 'begin', '.', 'successor', 'if', 'v_', '!=', 'node', ']', '+', 'node', '.', 'successor', 'for', 'ne_', 'in', 'end', ':', 'ne_', '.', 'precedence', '=', '[', 'begin', 'if', 'v_', '==', 'node', 'else', 'v_', 'for', 'v_', 'in', 'ne_', '.', 'precedence', ']', 'node_list', '.', 'remove', '(', 'node', ')', 'return', 'node_list']
delete the node which has 1-input and n-output
['delete', 'the', 'node', 'which', 'has', '1', '-', 'input', 'and', 'n', '-', 'output']
train
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxutils/onnxconverter_common/optimizer.py#L290-L315
6,538
yangl1996/libpagure
libpagure/libpagure.py
Pagure.create_basic_url
def create_basic_url(self): """ Create URL prefix for API calls based on type of repo. Repo may be forked and may be in namespace. That makes total 4 different types of URL. :return: """ if self.username is None: if self.namespace is None: request_url = "{}/api/0/{}/".format( self.instance, self.repo) else: request_url = "{}/api/0/{}/{}/".format( self.instance, self.namespace, self.repo) else: if self.namespace is None: request_url = "{}/api/0/fork/{}/{}/".format( self.instance, self.username, self.repo) else: request_url = "{}/api/0/fork/{}/{}/{}/".format( self.instance, self.username, self.namespace, self.repo) return request_url
python
def create_basic_url(self): """ Create URL prefix for API calls based on type of repo. Repo may be forked and may be in namespace. That makes total 4 different types of URL. :return: """ if self.username is None: if self.namespace is None: request_url = "{}/api/0/{}/".format( self.instance, self.repo) else: request_url = "{}/api/0/{}/{}/".format( self.instance, self.namespace, self.repo) else: if self.namespace is None: request_url = "{}/api/0/fork/{}/{}/".format( self.instance, self.username, self.repo) else: request_url = "{}/api/0/fork/{}/{}/{}/".format( self.instance, self.username, self.namespace, self.repo) return request_url
['def', 'create_basic_url', '(', 'self', ')', ':', 'if', 'self', '.', 'username', 'is', 'None', ':', 'if', 'self', '.', 'namespace', 'is', 'None', ':', 'request_url', '=', '"{}/api/0/{}/"', '.', 'format', '(', 'self', '.', 'instance', ',', 'self', '.', 'repo', ')', 'else', ':', 'request_url', '=', '"{}/api/0/{}/{}/"', '.', 'format', '(', 'self', '.', 'instance', ',', 'self', '.', 'namespace', ',', 'self', '.', 'repo', ')', 'else', ':', 'if', 'self', '.', 'namespace', 'is', 'None', ':', 'request_url', '=', '"{}/api/0/fork/{}/{}/"', '.', 'format', '(', 'self', '.', 'instance', ',', 'self', '.', 'username', ',', 'self', '.', 'repo', ')', 'else', ':', 'request_url', '=', '"{}/api/0/fork/{}/{}/{}/"', '.', 'format', '(', 'self', '.', 'instance', ',', 'self', '.', 'username', ',', 'self', '.', 'namespace', ',', 'self', '.', 'repo', ')', 'return', 'request_url']
Create URL prefix for API calls based on type of repo. Repo may be forked and may be in namespace. That makes total 4 different types of URL. :return:
['Create', 'URL', 'prefix', 'for', 'API', 'calls', 'based', 'on', 'type', 'of', 'repo', '.']
train
https://github.com/yangl1996/libpagure/blob/dd96ed29142407463790c66ed321984a6ea7465a/libpagure/libpagure.py#L92-L114
6,539
NoviceLive/intellicoder
intellicoder/sources.py
make_c_header
def make_c_header(name, front, body): """ Build a C header from the front and body. """ return """ {0} # ifndef _GU_ZHENGXIONG_{1}_H # define _GU_ZHENGXIONG_{1}_H {2} # endif /* {3}.h */ """.strip().format(front, name.upper(), body, name) + '\n'
python
def make_c_header(name, front, body): """ Build a C header from the front and body. """ return """ {0} # ifndef _GU_ZHENGXIONG_{1}_H # define _GU_ZHENGXIONG_{1}_H {2} # endif /* {3}.h */ """.strip().format(front, name.upper(), body, name) + '\n'
['def', 'make_c_header', '(', 'name', ',', 'front', ',', 'body', ')', ':', 'return', '"""\n{0}\n\n\n# ifndef _GU_ZHENGXIONG_{1}_H\n# define _GU_ZHENGXIONG_{1}_H\n\n\n{2}\n\n\n# endif /* {3}.h */\n """', '.', 'strip', '(', ')', '.', 'format', '(', 'front', ',', 'name', '.', 'upper', '(', ')', ',', 'body', ',', 'name', ')', '+', "'\\n'"]
Build a C header from the front and body.
['Build', 'a', 'C', 'header', 'from', 'the', 'front', 'and', 'body', '.']
train
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/sources.py#L77-L93
6,540
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewpanel.py
XViewPanelBar.setTabText
def setTabText(self, index, text): """ Returns the text for the tab at the inputed index. :param index | <int> :return <str> """ try: self.items()[index].setText(text) except IndexError: pass
python
def setTabText(self, index, text): """ Returns the text for the tab at the inputed index. :param index | <int> :return <str> """ try: self.items()[index].setText(text) except IndexError: pass
['def', 'setTabText', '(', 'self', ',', 'index', ',', 'text', ')', ':', 'try', ':', 'self', '.', 'items', '(', ')', '[', 'index', ']', '.', 'setText', '(', 'text', ')', 'except', 'IndexError', ':', 'pass']
Returns the text for the tab at the inputed index. :param index | <int> :return <str>
['Returns', 'the', 'text', 'for', 'the', 'tab', 'at', 'the', 'inputed', 'index', '.']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewpanel.py#L626-L637
6,541
urbn/Caesium
caesium/document.py
AsyncRevisionStackManager.__get_pending_revisions
def __get_pending_revisions(self): """ Get all the pending revisions after the current time :return: A list of revisions :rtype: list """ dttime = time.mktime(datetime.datetime.now().timetuple()) changes = yield self.revisions.find({ "toa" : { "$lt" : dttime, }, "processed": False, "inProcess": None }) if len(changes) > 0: yield self.set_all_revisions_to_in_process([change.get("id") for change in changes]) raise Return(changes)
python
def __get_pending_revisions(self): """ Get all the pending revisions after the current time :return: A list of revisions :rtype: list """ dttime = time.mktime(datetime.datetime.now().timetuple()) changes = yield self.revisions.find({ "toa" : { "$lt" : dttime, }, "processed": False, "inProcess": None }) if len(changes) > 0: yield self.set_all_revisions_to_in_process([change.get("id") for change in changes]) raise Return(changes)
['def', '__get_pending_revisions', '(', 'self', ')', ':', 'dttime', '=', 'time', '.', 'mktime', '(', 'datetime', '.', 'datetime', '.', 'now', '(', ')', '.', 'timetuple', '(', ')', ')', 'changes', '=', 'yield', 'self', '.', 'revisions', '.', 'find', '(', '{', '"toa"', ':', '{', '"$lt"', ':', 'dttime', ',', '}', ',', '"processed"', ':', 'False', ',', '"inProcess"', ':', 'None', '}', ')', 'if', 'len', '(', 'changes', ')', '>', '0', ':', 'yield', 'self', '.', 'set_all_revisions_to_in_process', '(', '[', 'change', '.', 'get', '(', '"id"', ')', 'for', 'change', 'in', 'changes', ']', ')', 'raise', 'Return', '(', 'changes', ')']
Get all the pending revisions after the current time :return: A list of revisions :rtype: list
['Get', 'all', 'the', 'pending', 'revisions', 'after', 'the', 'current', 'time']
train
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L67-L86
6,542
itamarst/eliot
eliot/_action.py
WrittenAction.status
def status(self): """ One of C{STARTED_STATUS}, C{SUCCEEDED_STATUS}, C{FAILED_STATUS} or C{None}. """ message = self.end_message if self.end_message else self.start_message if message: return message.contents[ACTION_STATUS_FIELD] else: return None
python
def status(self): """ One of C{STARTED_STATUS}, C{SUCCEEDED_STATUS}, C{FAILED_STATUS} or C{None}. """ message = self.end_message if self.end_message else self.start_message if message: return message.contents[ACTION_STATUS_FIELD] else: return None
['def', 'status', '(', 'self', ')', ':', 'message', '=', 'self', '.', 'end_message', 'if', 'self', '.', 'end_message', 'else', 'self', '.', 'start_message', 'if', 'message', ':', 'return', 'message', '.', 'contents', '[', 'ACTION_STATUS_FIELD', ']', 'else', ':', 'return', 'None']
One of C{STARTED_STATUS}, C{SUCCEEDED_STATUS}, C{FAILED_STATUS} or C{None}.
['One', 'of', 'C', '{', 'STARTED_STATUS', '}', 'C', '{', 'SUCCEEDED_STATUS', '}', 'C', '{', 'FAILED_STATUS', '}', 'or', 'C', '{', 'None', '}', '.']
train
https://github.com/itamarst/eliot/blob/c03c96520c5492fadfc438b4b0f6336e2785ba2d/eliot/_action.py#L594-L603
6,543
akaszynski/pymeshfix
pymeshfix/examples/fix.py
with_vtk
def with_vtk(plot=True): """ Tests VTK interface and mesh repair of Stanford Bunny Mesh """ mesh = vtki.PolyData(bunny_scan) meshfix = pymeshfix.MeshFix(mesh) if plot: print('Plotting input mesh') meshfix.plot() meshfix.repair() if plot: print('Plotting repaired mesh') meshfix.plot() return meshfix.mesh
python
def with_vtk(plot=True): """ Tests VTK interface and mesh repair of Stanford Bunny Mesh """ mesh = vtki.PolyData(bunny_scan) meshfix = pymeshfix.MeshFix(mesh) if plot: print('Plotting input mesh') meshfix.plot() meshfix.repair() if plot: print('Plotting repaired mesh') meshfix.plot() return meshfix.mesh
['def', 'with_vtk', '(', 'plot', '=', 'True', ')', ':', 'mesh', '=', 'vtki', '.', 'PolyData', '(', 'bunny_scan', ')', 'meshfix', '=', 'pymeshfix', '.', 'MeshFix', '(', 'mesh', ')', 'if', 'plot', ':', 'print', '(', "'Plotting input mesh'", ')', 'meshfix', '.', 'plot', '(', ')', 'meshfix', '.', 'repair', '(', ')', 'if', 'plot', ':', 'print', '(', "'Plotting repaired mesh'", ')', 'meshfix', '.', 'plot', '(', ')', 'return', 'meshfix', '.', 'mesh']
Tests VTK interface and mesh repair of Stanford Bunny Mesh
['Tests', 'VTK', 'interface', 'and', 'mesh', 'repair', 'of', 'Stanford', 'Bunny', 'Mesh']
train
https://github.com/akaszynski/pymeshfix/blob/51873b25d8d46168479989a528db8456af6748f4/pymeshfix/examples/fix.py#L15-L27
6,544
fhs/pyhdf
pyhdf/SD.py
SDS.setrange
def setrange(self, min, max): """Set the dataset min and max values. Args:: min dataset minimum value (attribute 'valid_range') max dataset maximum value (attribute 'valid_range') Returns:: None The data range is part of the so-called "standard" SDS attributes. Calling method 'setrange' is equivalent to setting the following attribute with a 2-element [min,max] array:: valid_range C library equivalent: SDsetrange """ # Obtain SDS data type. try: sds_name, rank, dim_sizes, data_type, n_attrs = self.info() except HDF4Error: raise HDF4Error('setrange : cannot execute') n_values = 1 if data_type == SDC.CHAR8: buf1 = _C.array_byte(n_values) buf2 = _C.array_byte(n_values) elif data_type in [SDC.UCHAR8, SDC.UINT8]: buf1 = _C.array_byte(n_values) buf2 = _C.array_byte(n_values) elif data_type == SDC.INT8: buf1 = _C.array_int8(n_values) buf2 = _C.array_int8(n_values) elif data_type == SDC.INT16: buf1 = _C.array_int16(n_values) buf2 = _C.array_int16(n_values) elif data_type == SDC.UINT16: buf1 = _C.array_uint16(n_values) buf2 = _C.array_uint16(n_values) elif data_type == SDC.INT32: buf1 = _C.array_int32(n_values) buf2 = _C.array_int32(n_values) elif data_type == SDC.UINT32: buf1 = _C.array_uint32(n_values) buf2 = _C.array_uint32(n_values) elif data_type == SDC.FLOAT32: buf1 = _C.array_float32(n_values) buf2 = _C.array_float32(n_values) elif data_type == SDC.FLOAT64: buf1 = _C.array_float64(n_values) buf2 = _C.array_float64(n_values) else: raise HDF4Error("SDsetrange: SDS has an illegal or " \ "unsupported type %d" % data_type) buf1[0] = max buf2[0] = min status = _C.SDsetrange(self._id, buf1, buf2) _checkErr('setrange', status, 'cannot execute')
python
def setrange(self, min, max): """Set the dataset min and max values. Args:: min dataset minimum value (attribute 'valid_range') max dataset maximum value (attribute 'valid_range') Returns:: None The data range is part of the so-called "standard" SDS attributes. Calling method 'setrange' is equivalent to setting the following attribute with a 2-element [min,max] array:: valid_range C library equivalent: SDsetrange """ # Obtain SDS data type. try: sds_name, rank, dim_sizes, data_type, n_attrs = self.info() except HDF4Error: raise HDF4Error('setrange : cannot execute') n_values = 1 if data_type == SDC.CHAR8: buf1 = _C.array_byte(n_values) buf2 = _C.array_byte(n_values) elif data_type in [SDC.UCHAR8, SDC.UINT8]: buf1 = _C.array_byte(n_values) buf2 = _C.array_byte(n_values) elif data_type == SDC.INT8: buf1 = _C.array_int8(n_values) buf2 = _C.array_int8(n_values) elif data_type == SDC.INT16: buf1 = _C.array_int16(n_values) buf2 = _C.array_int16(n_values) elif data_type == SDC.UINT16: buf1 = _C.array_uint16(n_values) buf2 = _C.array_uint16(n_values) elif data_type == SDC.INT32: buf1 = _C.array_int32(n_values) buf2 = _C.array_int32(n_values) elif data_type == SDC.UINT32: buf1 = _C.array_uint32(n_values) buf2 = _C.array_uint32(n_values) elif data_type == SDC.FLOAT32: buf1 = _C.array_float32(n_values) buf2 = _C.array_float32(n_values) elif data_type == SDC.FLOAT64: buf1 = _C.array_float64(n_values) buf2 = _C.array_float64(n_values) else: raise HDF4Error("SDsetrange: SDS has an illegal or " \ "unsupported type %d" % data_type) buf1[0] = max buf2[0] = min status = _C.SDsetrange(self._id, buf1, buf2) _checkErr('setrange', status, 'cannot execute')
['def', 'setrange', '(', 'self', ',', 'min', ',', 'max', ')', ':', '# Obtain SDS data type.', 'try', ':', 'sds_name', ',', 'rank', ',', 'dim_sizes', ',', 'data_type', ',', 'n_attrs', '=', 'self', '.', 'info', '(', ')', 'except', 'HDF4Error', ':', 'raise', 'HDF4Error', '(', "'setrange : cannot execute'", ')', 'n_values', '=', '1', 'if', 'data_type', '==', 'SDC', '.', 'CHAR8', ':', 'buf1', '=', '_C', '.', 'array_byte', '(', 'n_values', ')', 'buf2', '=', '_C', '.', 'array_byte', '(', 'n_values', ')', 'elif', 'data_type', 'in', '[', 'SDC', '.', 'UCHAR8', ',', 'SDC', '.', 'UINT8', ']', ':', 'buf1', '=', '_C', '.', 'array_byte', '(', 'n_values', ')', 'buf2', '=', '_C', '.', 'array_byte', '(', 'n_values', ')', 'elif', 'data_type', '==', 'SDC', '.', 'INT8', ':', 'buf1', '=', '_C', '.', 'array_int8', '(', 'n_values', ')', 'buf2', '=', '_C', '.', 'array_int8', '(', 'n_values', ')', 'elif', 'data_type', '==', 'SDC', '.', 'INT16', ':', 'buf1', '=', '_C', '.', 'array_int16', '(', 'n_values', ')', 'buf2', '=', '_C', '.', 'array_int16', '(', 'n_values', ')', 'elif', 'data_type', '==', 'SDC', '.', 'UINT16', ':', 'buf1', '=', '_C', '.', 'array_uint16', '(', 'n_values', ')', 'buf2', '=', '_C', '.', 'array_uint16', '(', 'n_values', ')', 'elif', 'data_type', '==', 'SDC', '.', 'INT32', ':', 'buf1', '=', '_C', '.', 'array_int32', '(', 'n_values', ')', 'buf2', '=', '_C', '.', 'array_int32', '(', 'n_values', ')', 'elif', 'data_type', '==', 'SDC', '.', 'UINT32', ':', 'buf1', '=', '_C', '.', 'array_uint32', '(', 'n_values', ')', 'buf2', '=', '_C', '.', 'array_uint32', '(', 'n_values', ')', 'elif', 'data_type', '==', 'SDC', '.', 'FLOAT32', ':', 'buf1', '=', '_C', '.', 'array_float32', '(', 'n_values', ')', 'buf2', '=', '_C', '.', 'array_float32', '(', 'n_values', ')', 'elif', 'data_type', '==', 'SDC', '.', 'FLOAT64', ':', 'buf1', '=', '_C', '.', 'array_float64', '(', 'n_values', ')', 'buf2', '=', '_C', '.', 'array_float64', '(', 'n_values', ')', 'else', ':', 'raise', 'HDF4Error', '(', '"SDsetrange: SDS has an illegal or "', '"unsupported type %d"', '%', 'data_type', ')', 'buf1', '[', '0', ']', '=', 'max', 'buf2', '[', '0', ']', '=', 'min', 'status', '=', '_C', '.', 'SDsetrange', '(', 'self', '.', '_id', ',', 'buf1', ',', 'buf2', ')', '_checkErr', '(', "'setrange'", ',', 'status', ',', "'cannot execute'", ')']
Set the dataset min and max values. Args:: min dataset minimum value (attribute 'valid_range') max dataset maximum value (attribute 'valid_range') Returns:: None The data range is part of the so-called "standard" SDS attributes. Calling method 'setrange' is equivalent to setting the following attribute with a 2-element [min,max] array:: valid_range C library equivalent: SDsetrange
['Set', 'the', 'dataset', 'min', 'and', 'max', 'values', '.']
train
https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2555-L2629
6,545
sphinx-gallery/sphinx-gallery
sphinx_gallery/docs_resolv.py
SphinxDocLinkResolver._get_link
def _get_link(self, cobj): """Get a valid link, False if not found""" fullname = cobj['module_short'] + '.' + cobj['name'] try: value = self._searchindex['objects'][cobj['module_short']] match = value[cobj['name']] except KeyError: link = False else: fname_idx = match[0] objname_idx = str(match[1]) anchor = match[3] fname = self._searchindex['filenames'][fname_idx] # In 1.5+ Sphinx seems to have changed from .rst.html to only # .html extension in converted files. Find this from the options. ext = self._docopts.get('FILE_SUFFIX', '.rst.html') fname = os.path.splitext(fname)[0] + ext if self._is_windows: fname = fname.replace('/', '\\') link = os.path.join(self.doc_url, fname) else: link = posixpath.join(self.doc_url, fname) if anchor == '': anchor = fullname elif anchor == '-': anchor = (self._searchindex['objnames'][objname_idx][1] + '-' + fullname) link = link + '#' + anchor return link
python
def _get_link(self, cobj): """Get a valid link, False if not found""" fullname = cobj['module_short'] + '.' + cobj['name'] try: value = self._searchindex['objects'][cobj['module_short']] match = value[cobj['name']] except KeyError: link = False else: fname_idx = match[0] objname_idx = str(match[1]) anchor = match[3] fname = self._searchindex['filenames'][fname_idx] # In 1.5+ Sphinx seems to have changed from .rst.html to only # .html extension in converted files. Find this from the options. ext = self._docopts.get('FILE_SUFFIX', '.rst.html') fname = os.path.splitext(fname)[0] + ext if self._is_windows: fname = fname.replace('/', '\\') link = os.path.join(self.doc_url, fname) else: link = posixpath.join(self.doc_url, fname) if anchor == '': anchor = fullname elif anchor == '-': anchor = (self._searchindex['objnames'][objname_idx][1] + '-' + fullname) link = link + '#' + anchor return link
['def', '_get_link', '(', 'self', ',', 'cobj', ')', ':', 'fullname', '=', 'cobj', '[', "'module_short'", ']', '+', "'.'", '+', 'cobj', '[', "'name'", ']', 'try', ':', 'value', '=', 'self', '.', '_searchindex', '[', "'objects'", ']', '[', 'cobj', '[', "'module_short'", ']', ']', 'match', '=', 'value', '[', 'cobj', '[', "'name'", ']', ']', 'except', 'KeyError', ':', 'link', '=', 'False', 'else', ':', 'fname_idx', '=', 'match', '[', '0', ']', 'objname_idx', '=', 'str', '(', 'match', '[', '1', ']', ')', 'anchor', '=', 'match', '[', '3', ']', 'fname', '=', 'self', '.', '_searchindex', '[', "'filenames'", ']', '[', 'fname_idx', ']', '# In 1.5+ Sphinx seems to have changed from .rst.html to only', '# .html extension in converted files. Find this from the options.', 'ext', '=', 'self', '.', '_docopts', '.', 'get', '(', "'FILE_SUFFIX'", ',', "'.rst.html'", ')', 'fname', '=', 'os', '.', 'path', '.', 'splitext', '(', 'fname', ')', '[', '0', ']', '+', 'ext', 'if', 'self', '.', '_is_windows', ':', 'fname', '=', 'fname', '.', 'replace', '(', "'/'", ',', "'\\\\'", ')', 'link', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'doc_url', ',', 'fname', ')', 'else', ':', 'link', '=', 'posixpath', '.', 'join', '(', 'self', '.', 'doc_url', ',', 'fname', ')', 'if', 'anchor', '==', "''", ':', 'anchor', '=', 'fullname', 'elif', 'anchor', '==', "'-'", ':', 'anchor', '=', '(', 'self', '.', '_searchindex', '[', "'objnames'", ']', '[', 'objname_idx', ']', '[', '1', ']', '+', "'-'", '+', 'fullname', ')', 'link', '=', 'link', '+', "'#'", '+', 'anchor', 'return', 'link']
Get a valid link, False if not found
['Get', 'a', 'valid', 'link', 'False', 'if', 'not', 'found']
train
https://github.com/sphinx-gallery/sphinx-gallery/blob/b0c1f6701bf3f4cef238757e1105cf3686b5e674/sphinx_gallery/docs_resolv.py#L191-L224
6,546
bjmorgan/lattice_mc
lattice_mc/lattice.py
Lattice.site_occupation_statistics
def site_occupation_statistics( self ): """ Average site occupation for each site type Args: None Returns: (Dict(Str:Float)): Dictionary of occupation statistics, e.g.:: { 'A' : 2.5, 'B' : 25.3 } """ if self.time == 0.0: return None occupation_stats = { label : 0.0 for label in self.site_labels } for site in self.sites: occupation_stats[ site.label ] += site.time_occupied for label in self.site_labels: occupation_stats[ label ] /= self.time return occupation_stats
python
def site_occupation_statistics( self ): """ Average site occupation for each site type Args: None Returns: (Dict(Str:Float)): Dictionary of occupation statistics, e.g.:: { 'A' : 2.5, 'B' : 25.3 } """ if self.time == 0.0: return None occupation_stats = { label : 0.0 for label in self.site_labels } for site in self.sites: occupation_stats[ site.label ] += site.time_occupied for label in self.site_labels: occupation_stats[ label ] /= self.time return occupation_stats
['def', 'site_occupation_statistics', '(', 'self', ')', ':', 'if', 'self', '.', 'time', '==', '0.0', ':', 'return', 'None', 'occupation_stats', '=', '{', 'label', ':', '0.0', 'for', 'label', 'in', 'self', '.', 'site_labels', '}', 'for', 'site', 'in', 'self', '.', 'sites', ':', 'occupation_stats', '[', 'site', '.', 'label', ']', '+=', 'site', '.', 'time_occupied', 'for', 'label', 'in', 'self', '.', 'site_labels', ':', 'occupation_stats', '[', 'label', ']', '/=', 'self', '.', 'time', 'return', 'occupation_stats']
Average site occupation for each site type Args: None Returns: (Dict(Str:Float)): Dictionary of occupation statistics, e.g.:: { 'A' : 2.5, 'B' : 25.3 }
['Average', 'site', 'occupation', 'for', 'each', 'site', 'type']
train
https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/lattice.py#L250-L269
6,547
duniter/duniter-python-api
duniterpy/documents/peer.py
Peer.raw
def raw(self) -> str: """ Return a raw format string of the Peer document :return: """ doc = """Version: {0} Type: Peer Currency: {1} PublicKey: {2} Block: {3} Endpoints: """.format(self.version, self.currency, self.pubkey, self.blockUID) for _endpoint in self.endpoints: doc += "{0}\n".format(_endpoint.inline()) return doc
python
def raw(self) -> str: """ Return a raw format string of the Peer document :return: """ doc = """Version: {0} Type: Peer Currency: {1} PublicKey: {2} Block: {3} Endpoints: """.format(self.version, self.currency, self.pubkey, self.blockUID) for _endpoint in self.endpoints: doc += "{0}\n".format(_endpoint.inline()) return doc
['def', 'raw', '(', 'self', ')', '->', 'str', ':', 'doc', '=', '"""Version: {0}\nType: Peer\nCurrency: {1}\nPublicKey: {2}\nBlock: {3}\nEndpoints:\n"""', '.', 'format', '(', 'self', '.', 'version', ',', 'self', '.', 'currency', ',', 'self', '.', 'pubkey', ',', 'self', '.', 'blockUID', ')', 'for', '_endpoint', 'in', 'self', '.', 'endpoints', ':', 'doc', '+=', '"{0}\\n"', '.', 'format', '(', '_endpoint', '.', 'inline', '(', ')', ')', 'return', 'doc']
Return a raw format string of the Peer document :return:
['Return', 'a', 'raw', 'format', 'string', 'of', 'the', 'Peer', 'document']
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/peer.py#L101-L118
6,548
vowatchka/palindromus
palindromus/__init__.py
checktext
def checktext(sometext, interchange = ALL): """ Checks that some text is palindrome. Checking performs case-insensitive :param str sometext: It is some string that will be checked for palindrome as text. What is the text see at help(palindromus.istext) The text can be multiline. :keyword dict interchange: It is dictionary of interchangeable letters :except TypeError: If the checked text is not a string :return bool: """ # check invalid data types OnlyStringsCanBeChecked(sometext) if istext(sometext): return checkstring(sometext, interchange = interchange) else: return False
python
def checktext(sometext, interchange = ALL): """ Checks that some text is palindrome. Checking performs case-insensitive :param str sometext: It is some string that will be checked for palindrome as text. What is the text see at help(palindromus.istext) The text can be multiline. :keyword dict interchange: It is dictionary of interchangeable letters :except TypeError: If the checked text is not a string :return bool: """ # check invalid data types OnlyStringsCanBeChecked(sometext) if istext(sometext): return checkstring(sometext, interchange = interchange) else: return False
['def', 'checktext', '(', 'sometext', ',', 'interchange', '=', 'ALL', ')', ':', '# check invalid data types', 'OnlyStringsCanBeChecked', '(', 'sometext', ')', 'if', 'istext', '(', 'sometext', ')', ':', 'return', 'checkstring', '(', 'sometext', ',', 'interchange', '=', 'interchange', ')', 'else', ':', 'return', 'False']
Checks that some text is palindrome. Checking performs case-insensitive :param str sometext: It is some string that will be checked for palindrome as text. What is the text see at help(palindromus.istext) The text can be multiline. :keyword dict interchange: It is dictionary of interchangeable letters :except TypeError: If the checked text is not a string :return bool:
['Checks', 'that', 'some', 'text', 'is', 'palindrome', '.', 'Checking', 'performs', 'case', '-', 'insensitive', ':', 'param', 'str', 'sometext', ':', 'It', 'is', 'some', 'string', 'that', 'will', 'be', 'checked', 'for', 'palindrome', 'as', 'text', '.', 'What', 'is', 'the', 'text', 'see', 'at', 'help', '(', 'palindromus', '.', 'istext', ')', 'The', 'text', 'can', 'be', 'multiline', '.', ':', 'keyword', 'dict', 'interchange', ':', 'It', 'is', 'dictionary', 'of', 'interchangeable', 'letters', ':', 'except', 'TypeError', ':', 'If', 'the', 'checked', 'text', 'is', 'not', 'a', 'string', ':', 'return', 'bool', ':']
train
https://github.com/vowatchka/palindromus/blob/2fdac9259d7ba515d27cfde48c6d2be721594d66/palindromus/__init__.py#L291-L316
6,549
django-extensions/django-extensions
django_extensions/templatetags/widont.py
widont
def widont(value, count=1): """ Add an HTML non-breaking space between the final two words of the string to avoid "widowed" words. Examples: >>> print(widont('Test me out')) Test me&nbsp;out >>> print("'",widont('It works with trailing spaces too '), "'") ' It works with trailing spaces&nbsp;too ' >>> print(widont('NoEffect')) NoEffect """ def replace(matchobj): return force_text('&nbsp;%s' % matchobj.group(1)) for i in range(count): value = re_widont.sub(replace, force_text(value)) return value
python
def widont(value, count=1): """ Add an HTML non-breaking space between the final two words of the string to avoid "widowed" words. Examples: >>> print(widont('Test me out')) Test me&nbsp;out >>> print("'",widont('It works with trailing spaces too '), "'") ' It works with trailing spaces&nbsp;too ' >>> print(widont('NoEffect')) NoEffect """ def replace(matchobj): return force_text('&nbsp;%s' % matchobj.group(1)) for i in range(count): value = re_widont.sub(replace, force_text(value)) return value
['def', 'widont', '(', 'value', ',', 'count', '=', '1', ')', ':', 'def', 'replace', '(', 'matchobj', ')', ':', 'return', 'force_text', '(', "'&nbsp;%s'", '%', 'matchobj', '.', 'group', '(', '1', ')', ')', 'for', 'i', 'in', 'range', '(', 'count', ')', ':', 'value', '=', 're_widont', '.', 'sub', '(', 'replace', ',', 'force_text', '(', 'value', ')', ')', 'return', 'value']
Add an HTML non-breaking space between the final two words of the string to avoid "widowed" words. Examples: >>> print(widont('Test me out')) Test me&nbsp;out >>> print("'",widont('It works with trailing spaces too '), "'") ' It works with trailing spaces&nbsp;too ' >>> print(widont('NoEffect')) NoEffect
['Add', 'an', 'HTML', 'non', '-', 'breaking', 'space', 'between', 'the', 'final', 'two', 'words', 'of', 'the', 'string', 'to', 'avoid', 'widowed', 'words', '.']
train
https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/templatetags/widont.py#L15-L35
6,550
dacut/python-aws-sig
awssig/sigv4.py
AWSSigV4Verifier.canonical_request
def canonical_request(self): """ The AWS SigV4 canonical request given parameters from an HTTP request. This process is outlined here: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html The canonical request is: request_method + '\n' + canonical_uri_path + '\n' + canonical_query_string + '\n' + signed_headers + '\n' + sha256(body).hexdigest() """ signed_headers = self.signed_headers header_lines = "".join( ["%s:%s\n" % item for item in iteritems(signed_headers)]) header_keys = ";".join([key for key in iterkeys(self.signed_headers)]) return (self.request_method + "\n" + self.canonical_uri_path + "\n" + self.canonical_query_string + "\n" + header_lines + "\n" + header_keys + "\n" + sha256(self.body).hexdigest())
python
def canonical_request(self): """ The AWS SigV4 canonical request given parameters from an HTTP request. This process is outlined here: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html The canonical request is: request_method + '\n' + canonical_uri_path + '\n' + canonical_query_string + '\n' + signed_headers + '\n' + sha256(body).hexdigest() """ signed_headers = self.signed_headers header_lines = "".join( ["%s:%s\n" % item for item in iteritems(signed_headers)]) header_keys = ";".join([key for key in iterkeys(self.signed_headers)]) return (self.request_method + "\n" + self.canonical_uri_path + "\n" + self.canonical_query_string + "\n" + header_lines + "\n" + header_keys + "\n" + sha256(self.body).hexdigest())
['def', 'canonical_request', '(', 'self', ')', ':', 'signed_headers', '=', 'self', '.', 'signed_headers', 'header_lines', '=', '""', '.', 'join', '(', '[', '"%s:%s\\n"', '%', 'item', 'for', 'item', 'in', 'iteritems', '(', 'signed_headers', ')', ']', ')', 'header_keys', '=', '";"', '.', 'join', '(', '[', 'key', 'for', 'key', 'in', 'iterkeys', '(', 'self', '.', 'signed_headers', ')', ']', ')', 'return', '(', 'self', '.', 'request_method', '+', '"\\n"', '+', 'self', '.', 'canonical_uri_path', '+', '"\\n"', '+', 'self', '.', 'canonical_query_string', '+', '"\\n"', '+', 'header_lines', '+', '"\\n"', '+', 'header_keys', '+', '"\\n"', '+', 'sha256', '(', 'self', '.', 'body', ')', '.', 'hexdigest', '(', ')', ')']
The AWS SigV4 canonical request given parameters from an HTTP request. This process is outlined here: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html The canonical request is: request_method + '\n' + canonical_uri_path + '\n' + canonical_query_string + '\n' + signed_headers + '\n' + sha256(body).hexdigest()
['The', 'AWS', 'SigV4', 'canonical', 'request', 'given', 'parameters', 'from', 'an', 'HTTP', 'request', '.', 'This', 'process', 'is', 'outlined', 'here', ':', 'http', ':', '//', 'docs', '.', 'aws', '.', 'amazon', '.', 'com', '/', 'general', '/', 'latest', '/', 'gr', '/', 'sigv4', '-', 'create', '-', 'canonical', '-', 'request', '.', 'html']
train
https://github.com/dacut/python-aws-sig/blob/7f6054dca4b32e67ca3d39db31c1b4be5efe54bd/awssig/sigv4.py#L302-L325
6,551
wtsi-hgi/gitlab-build-variables
gitlabbuildvariables/update/_single_project_updaters.py
FileBasedProjectVariablesUpdater._resolve_group_location
def _resolve_group_location(self, group: str) -> str: """ Resolves the location of a setting file based on the given identifier. :param group: the identifier for the group's settings file (~its location) :return: the absolute path of the settings location """ if os.path.isabs(group): possible_paths = [group] else: possible_paths = [] for repository in self.setting_repositories: possible_paths.append(os.path.join(repository, group)) for default_setting_extension in self.default_setting_extensions: number_of_paths = len(possible_paths) for i in range(number_of_paths): path_with_extension = "%s.%s" % (possible_paths[i], default_setting_extension) possible_paths.append(path_with_extension) for path in possible_paths: if os.path.exists(path): return path raise ValueError("Could not resolve location of settings identified by: \"%s\"" % group)
python
def _resolve_group_location(self, group: str) -> str: """ Resolves the location of a setting file based on the given identifier. :param group: the identifier for the group's settings file (~its location) :return: the absolute path of the settings location """ if os.path.isabs(group): possible_paths = [group] else: possible_paths = [] for repository in self.setting_repositories: possible_paths.append(os.path.join(repository, group)) for default_setting_extension in self.default_setting_extensions: number_of_paths = len(possible_paths) for i in range(number_of_paths): path_with_extension = "%s.%s" % (possible_paths[i], default_setting_extension) possible_paths.append(path_with_extension) for path in possible_paths: if os.path.exists(path): return path raise ValueError("Could not resolve location of settings identified by: \"%s\"" % group)
['def', '_resolve_group_location', '(', 'self', ',', 'group', ':', 'str', ')', '->', 'str', ':', 'if', 'os', '.', 'path', '.', 'isabs', '(', 'group', ')', ':', 'possible_paths', '=', '[', 'group', ']', 'else', ':', 'possible_paths', '=', '[', ']', 'for', 'repository', 'in', 'self', '.', 'setting_repositories', ':', 'possible_paths', '.', 'append', '(', 'os', '.', 'path', '.', 'join', '(', 'repository', ',', 'group', ')', ')', 'for', 'default_setting_extension', 'in', 'self', '.', 'default_setting_extensions', ':', 'number_of_paths', '=', 'len', '(', 'possible_paths', ')', 'for', 'i', 'in', 'range', '(', 'number_of_paths', ')', ':', 'path_with_extension', '=', '"%s.%s"', '%', '(', 'possible_paths', '[', 'i', ']', ',', 'default_setting_extension', ')', 'possible_paths', '.', 'append', '(', 'path_with_extension', ')', 'for', 'path', 'in', 'possible_paths', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'return', 'path', 'raise', 'ValueError', '(', '"Could not resolve location of settings identified by: \\"%s\\""', '%', 'group', ')']
Resolves the location of a setting file based on the given identifier. :param group: the identifier for the group's settings file (~its location) :return: the absolute path of the settings location
['Resolves', 'the', 'location', 'of', 'a', 'setting', 'file', 'based', 'on', 'the', 'given', 'identifier', '.', ':', 'param', 'group', ':', 'the', 'identifier', 'for', 'the', 'group', 's', 'settings', 'file', '(', '~its', 'location', ')', ':', 'return', ':', 'the', 'absolute', 'path', 'of', 'the', 'settings', 'location']
train
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/update/_single_project_updaters.py#L78-L100
6,552
emc-openstack/storops
storops/vnx/calculator.py
io_size_kb
def io_size_kb(prev, curr, counters): """ calculate the io size based on bandwidth and throughput formula: average_io_size = bandwidth / throughput :param prev: prev resource, not used :param curr: current resource :param counters: two stats, bandwidth in MB and throughput count :return: value, NaN if invalid """ bw_stats, io_stats = counters size_mb = div(getattr(curr, bw_stats), getattr(curr, io_stats)) return mul(size_mb, 1024)
python
def io_size_kb(prev, curr, counters): """ calculate the io size based on bandwidth and throughput formula: average_io_size = bandwidth / throughput :param prev: prev resource, not used :param curr: current resource :param counters: two stats, bandwidth in MB and throughput count :return: value, NaN if invalid """ bw_stats, io_stats = counters size_mb = div(getattr(curr, bw_stats), getattr(curr, io_stats)) return mul(size_mb, 1024)
['def', 'io_size_kb', '(', 'prev', ',', 'curr', ',', 'counters', ')', ':', 'bw_stats', ',', 'io_stats', '=', 'counters', 'size_mb', '=', 'div', '(', 'getattr', '(', 'curr', ',', 'bw_stats', ')', ',', 'getattr', '(', 'curr', ',', 'io_stats', ')', ')', 'return', 'mul', '(', 'size_mb', ',', '1024', ')']
calculate the io size based on bandwidth and throughput formula: average_io_size = bandwidth / throughput :param prev: prev resource, not used :param curr: current resource :param counters: two stats, bandwidth in MB and throughput count :return: value, NaN if invalid
['calculate', 'the', 'io', 'size', 'based', 'on', 'bandwidth', 'and', 'throughput']
train
https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/vnx/calculator.py#L187-L198
6,553
pallets/werkzeug
src/werkzeug/formparser.py
FormDataParser.parse_from_environ
def parse_from_environ(self, environ): """Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``. """ content_type = environ.get("CONTENT_TYPE", "") content_length = get_content_length(environ) mimetype, options = parse_options_header(content_type) return self.parse(get_input_stream(environ), mimetype, content_length, options)
python
def parse_from_environ(self, environ): """Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``. """ content_type = environ.get("CONTENT_TYPE", "") content_length = get_content_length(environ) mimetype, options = parse_options_header(content_type) return self.parse(get_input_stream(environ), mimetype, content_length, options)
['def', 'parse_from_environ', '(', 'self', ',', 'environ', ')', ':', 'content_type', '=', 'environ', '.', 'get', '(', '"CONTENT_TYPE"', ',', '""', ')', 'content_length', '=', 'get_content_length', '(', 'environ', ')', 'mimetype', ',', 'options', '=', 'parse_options_header', '(', 'content_type', ')', 'return', 'self', '.', 'parse', '(', 'get_input_stream', '(', 'environ', ')', ',', 'mimetype', ',', 'content_length', ',', 'options', ')']
Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``.
['Parses', 'the', 'information', 'from', 'the', 'environment', 'as', 'form', 'data', '.']
train
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/formparser.py#L197-L206
6,554
fabioz/PyDev.Debugger
third_party/pep8/lib2to3/lib2to3/fixer_base.py
BaseFix.start_tree
def start_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ self.used_names = tree.used_names self.set_filename(filename) self.numbers = itertools.count(1) self.first_log = True
python
def start_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ self.used_names = tree.used_names self.set_filename(filename) self.numbers = itertools.count(1) self.first_log = True
['def', 'start_tree', '(', 'self', ',', 'tree', ',', 'filename', ')', ':', 'self', '.', 'used_names', '=', 'tree', '.', 'used_names', 'self', '.', 'set_filename', '(', 'filename', ')', 'self', '.', 'numbers', '=', 'itertools', '.', 'count', '(', '1', ')', 'self', '.', 'first_log', '=', 'True']
Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from.
['Some', 'fixers', 'need', 'to', 'maintain', 'tree', '-', 'wide', 'state', '.', 'This', 'method', 'is', 'called', 'once', 'at', 'the', 'start', 'of', 'tree', 'fix', '-', 'up', '.']
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/fixer_base.py#L150-L160
6,555
fitnr/convertdate
convertdate/mayan.py
from_jd
def from_jd(jd): '''Calculate Mayan long count from Julian day''' d = jd - EPOCH baktun = trunc(d / 144000) d = (d % 144000) katun = trunc(d / 7200) d = (d % 7200) tun = trunc(d / 360) d = (d % 360) uinal = trunc(d / 20) kin = int((d % 20)) return (baktun, katun, tun, uinal, kin)
python
def from_jd(jd): '''Calculate Mayan long count from Julian day''' d = jd - EPOCH baktun = trunc(d / 144000) d = (d % 144000) katun = trunc(d / 7200) d = (d % 7200) tun = trunc(d / 360) d = (d % 360) uinal = trunc(d / 20) kin = int((d % 20)) return (baktun, katun, tun, uinal, kin)
['def', 'from_jd', '(', 'jd', ')', ':', 'd', '=', 'jd', '-', 'EPOCH', 'baktun', '=', 'trunc', '(', 'd', '/', '144000', ')', 'd', '=', '(', 'd', '%', '144000', ')', 'katun', '=', 'trunc', '(', 'd', '/', '7200', ')', 'd', '=', '(', 'd', '%', '7200', ')', 'tun', '=', 'trunc', '(', 'd', '/', '360', ')', 'd', '=', '(', 'd', '%', '360', ')', 'uinal', '=', 'trunc', '(', 'd', '/', '20', ')', 'kin', '=', 'int', '(', '(', 'd', '%', '20', ')', ')', 'return', '(', 'baktun', ',', 'katun', ',', 'tun', ',', 'uinal', ',', 'kin', ')']
Calculate Mayan long count from Julian day
['Calculate', 'Mayan', 'long', 'count', 'from', 'Julian', 'day']
train
https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/mayan.py#L41-L53
6,556
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
strip_head
def strip_head(sequence, values): """Strips elements of `values` from the beginning of `sequence`.""" values = set(values) return list(itertools.dropwhile(lambda x: x in values, sequence))
python
def strip_head(sequence, values): """Strips elements of `values` from the beginning of `sequence`.""" values = set(values) return list(itertools.dropwhile(lambda x: x in values, sequence))
['def', 'strip_head', '(', 'sequence', ',', 'values', ')', ':', 'values', '=', 'set', '(', 'values', ')', 'return', 'list', '(', 'itertools', '.', 'dropwhile', '(', 'lambda', 'x', ':', 'x', 'in', 'values', ',', 'sequence', ')', ')']
Strips elements of `values` from the beginning of `sequence`.
['Strips', 'elements', 'of', 'values', 'from', 'the', 'beginning', 'of', 'sequence', '.']
train
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L133-L136
6,557
tyarkoni/pliers
pliers/utils/base.py
batch_iterable
def batch_iterable(l, n): ''' Chunks iterable into n sized batches Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery''' i = iter(l) piece = list(islice(i, n)) while piece: yield piece piece = list(islice(i, n))
python
def batch_iterable(l, n): ''' Chunks iterable into n sized batches Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery''' i = iter(l) piece = list(islice(i, n)) while piece: yield piece piece = list(islice(i, n))
['def', 'batch_iterable', '(', 'l', ',', 'n', ')', ':', 'i', '=', 'iter', '(', 'l', ')', 'piece', '=', 'list', '(', 'islice', '(', 'i', ',', 'n', ')', ')', 'while', 'piece', ':', 'yield', 'piece', 'piece', '=', 'list', '(', 'islice', '(', 'i', ',', 'n', ')', ')']
Chunks iterable into n sized batches Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery
['Chunks', 'iterable', 'into', 'n', 'sized', 'batches', 'Solution', 'from', ':', 'http', ':', '//', 'stackoverflow', '.', 'com', '/', 'questions', '/', '1915170', '/', 'split', '-', 'a', '-', 'generator', '-', 'iterable', '-', 'every', '-', 'n', '-', 'items', '-', 'in', '-', 'python', '-', 'splitevery']
train
https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/utils/base.py#L45-L52
6,558
cloudant/python-cloudant
src/cloudant/design_document.py
DesignDocument.search_info
def search_info(self, search_index): """ Retrieves information about a specified search index within the design document, returns dictionary GET databasename/_design/{ddoc}/_search_info/{search_index} """ ddoc_search_info = self.r_session.get( '/'.join([self.document_url, '_search_info', search_index])) ddoc_search_info.raise_for_status() return response_to_json_dict(ddoc_search_info)
python
def search_info(self, search_index): """ Retrieves information about a specified search index within the design document, returns dictionary GET databasename/_design/{ddoc}/_search_info/{search_index} """ ddoc_search_info = self.r_session.get( '/'.join([self.document_url, '_search_info', search_index])) ddoc_search_info.raise_for_status() return response_to_json_dict(ddoc_search_info)
['def', 'search_info', '(', 'self', ',', 'search_index', ')', ':', 'ddoc_search_info', '=', 'self', '.', 'r_session', '.', 'get', '(', "'/'", '.', 'join', '(', '[', 'self', '.', 'document_url', ',', "'_search_info'", ',', 'search_index', ']', ')', ')', 'ddoc_search_info', '.', 'raise_for_status', '(', ')', 'return', 'response_to_json_dict', '(', 'ddoc_search_info', ')']
Retrieves information about a specified search index within the design document, returns dictionary GET databasename/_design/{ddoc}/_search_info/{search_index}
['Retrieves', 'information', 'about', 'a', 'specified', 'search', 'index', 'within', 'the', 'design', 'document', 'returns', 'dictionary']
train
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/design_document.py#L712-L722
6,559
saltstack/salt
salt/states/pcs.py
auth
def auth(name, nodes, pcsuser='hacluster', pcspasswd='hacluster', extra_args=None): ''' Ensure all nodes are authorized to the cluster name Irrelevant, not used (recommended: pcs_auth__auth) nodes a list of nodes which should be authorized to the cluster pcsuser user for communication with pcs (default: hacluster) pcspasswd password for pcsuser (default: hacluster) extra_args list of extra args for the \'pcs cluster auth\' command Example: .. code-block:: yaml pcs_auth__auth: pcs.auth: - nodes: - node1.example.com - node2.example.com - pcsuser: hacluster - pcspasswd: hoonetorg - extra_args: [] ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} auth_required = False authorized = __salt__['pcs.is_auth'](nodes=nodes) log.trace('Output of pcs.is_auth: %s', authorized) authorized_dict = {} for line in authorized['stdout'].splitlines(): node = line.split(':')[0].strip() auth_state = line.split(':')[1].strip() if node in nodes: authorized_dict.update({node: auth_state}) log.trace('authorized_dict: %s', authorized_dict) for node in nodes: if node in authorized_dict and authorized_dict[node] == 'Already authorized': ret['comment'] += 'Node {0} is already authorized\n'.format(node) else: auth_required = True if __opts__['test']: ret['comment'] += 'Node is set to authorize: {0}\n'.format(node) if not auth_required: return ret if __opts__['test']: ret['result'] = None return ret if not isinstance(extra_args, (list, tuple)): extra_args = [] if '--force' not in extra_args: extra_args += ['--force'] authorize = __salt__['pcs.auth'](nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd, extra_args=extra_args) log.trace('Output of pcs.auth: %s', authorize) authorize_dict = {} for line in authorize['stdout'].splitlines(): node = line.split(':')[0].strip() auth_state = line.split(':')[1].strip() if node in nodes: authorize_dict.update({node: auth_state}) log.trace('authorize_dict: %s', authorize_dict) for node in nodes: if node in authorize_dict and authorize_dict[node] == 'Authorized': ret['comment'] += 'Authorized {0}\n'.format(node) ret['changes'].update({node: {'old': '', 'new': 'Authorized'}}) else: ret['result'] = False if node in authorized_dict: ret['comment'] += 'Authorization check for node {0} returned: {1}\n'.format(node, authorized_dict[node]) if node in authorize_dict: ret['comment'] += 'Failed to authorize {0} with error {1}\n'.format(node, authorize_dict[node]) return ret
python
def auth(name, nodes, pcsuser='hacluster', pcspasswd='hacluster', extra_args=None): ''' Ensure all nodes are authorized to the cluster name Irrelevant, not used (recommended: pcs_auth__auth) nodes a list of nodes which should be authorized to the cluster pcsuser user for communication with pcs (default: hacluster) pcspasswd password for pcsuser (default: hacluster) extra_args list of extra args for the \'pcs cluster auth\' command Example: .. code-block:: yaml pcs_auth__auth: pcs.auth: - nodes: - node1.example.com - node2.example.com - pcsuser: hacluster - pcspasswd: hoonetorg - extra_args: [] ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} auth_required = False authorized = __salt__['pcs.is_auth'](nodes=nodes) log.trace('Output of pcs.is_auth: %s', authorized) authorized_dict = {} for line in authorized['stdout'].splitlines(): node = line.split(':')[0].strip() auth_state = line.split(':')[1].strip() if node in nodes: authorized_dict.update({node: auth_state}) log.trace('authorized_dict: %s', authorized_dict) for node in nodes: if node in authorized_dict and authorized_dict[node] == 'Already authorized': ret['comment'] += 'Node {0} is already authorized\n'.format(node) else: auth_required = True if __opts__['test']: ret['comment'] += 'Node is set to authorize: {0}\n'.format(node) if not auth_required: return ret if __opts__['test']: ret['result'] = None return ret if not isinstance(extra_args, (list, tuple)): extra_args = [] if '--force' not in extra_args: extra_args += ['--force'] authorize = __salt__['pcs.auth'](nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd, extra_args=extra_args) log.trace('Output of pcs.auth: %s', authorize) authorize_dict = {} for line in authorize['stdout'].splitlines(): node = line.split(':')[0].strip() auth_state = line.split(':')[1].strip() if node in nodes: authorize_dict.update({node: auth_state}) log.trace('authorize_dict: %s', authorize_dict) for node in nodes: if node in authorize_dict and authorize_dict[node] == 'Authorized': ret['comment'] += 'Authorized {0}\n'.format(node) ret['changes'].update({node: {'old': '', 'new': 'Authorized'}}) else: ret['result'] = False if node in authorized_dict: ret['comment'] += 'Authorization check for node {0} returned: {1}\n'.format(node, authorized_dict[node]) if node in authorize_dict: ret['comment'] += 'Failed to authorize {0} with error {1}\n'.format(node, authorize_dict[node]) return ret
['def', 'auth', '(', 'name', ',', 'nodes', ',', 'pcsuser', '=', "'hacluster'", ',', 'pcspasswd', '=', "'hacluster'", ',', 'extra_args', '=', 'None', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'result'", ':', 'True', ',', "'comment'", ':', "''", ',', "'changes'", ':', '{', '}', '}', 'auth_required', '=', 'False', 'authorized', '=', '__salt__', '[', "'pcs.is_auth'", ']', '(', 'nodes', '=', 'nodes', ')', 'log', '.', 'trace', '(', "'Output of pcs.is_auth: %s'", ',', 'authorized', ')', 'authorized_dict', '=', '{', '}', 'for', 'line', 'in', 'authorized', '[', "'stdout'", ']', '.', 'splitlines', '(', ')', ':', 'node', '=', 'line', '.', 'split', '(', "':'", ')', '[', '0', ']', '.', 'strip', '(', ')', 'auth_state', '=', 'line', '.', 'split', '(', "':'", ')', '[', '1', ']', '.', 'strip', '(', ')', 'if', 'node', 'in', 'nodes', ':', 'authorized_dict', '.', 'update', '(', '{', 'node', ':', 'auth_state', '}', ')', 'log', '.', 'trace', '(', "'authorized_dict: %s'", ',', 'authorized_dict', ')', 'for', 'node', 'in', 'nodes', ':', 'if', 'node', 'in', 'authorized_dict', 'and', 'authorized_dict', '[', 'node', ']', '==', "'Already authorized'", ':', 'ret', '[', "'comment'", ']', '+=', "'Node {0} is already authorized\\n'", '.', 'format', '(', 'node', ')', 'else', ':', 'auth_required', '=', 'True', 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '[', "'comment'", ']', '+=', "'Node is set to authorize: {0}\\n'", '.', 'format', '(', 'node', ')', 'if', 'not', 'auth_required', ':', 'return', 'ret', 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '[', "'result'", ']', '=', 'None', 'return', 'ret', 'if', 'not', 'isinstance', '(', 'extra_args', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'extra_args', '=', '[', ']', 'if', "'--force'", 'not', 'in', 'extra_args', ':', 'extra_args', '+=', '[', "'--force'", ']', 'authorize', '=', '__salt__', '[', "'pcs.auth'", ']', '(', 'nodes', '=', 'nodes', ',', 'pcsuser', '=', 'pcsuser', ',', 'pcspasswd', '=', 'pcspasswd', ',', 'extra_args', '=', 'extra_args', ')', 'log', '.', 'trace', '(', "'Output of pcs.auth: %s'", ',', 'authorize', ')', 'authorize_dict', '=', '{', '}', 'for', 'line', 'in', 'authorize', '[', "'stdout'", ']', '.', 'splitlines', '(', ')', ':', 'node', '=', 'line', '.', 'split', '(', "':'", ')', '[', '0', ']', '.', 'strip', '(', ')', 'auth_state', '=', 'line', '.', 'split', '(', "':'", ')', '[', '1', ']', '.', 'strip', '(', ')', 'if', 'node', 'in', 'nodes', ':', 'authorize_dict', '.', 'update', '(', '{', 'node', ':', 'auth_state', '}', ')', 'log', '.', 'trace', '(', "'authorize_dict: %s'", ',', 'authorize_dict', ')', 'for', 'node', 'in', 'nodes', ':', 'if', 'node', 'in', 'authorize_dict', 'and', 'authorize_dict', '[', 'node', ']', '==', "'Authorized'", ':', 'ret', '[', "'comment'", ']', '+=', "'Authorized {0}\\n'", '.', 'format', '(', 'node', ')', 'ret', '[', "'changes'", ']', '.', 'update', '(', '{', 'node', ':', '{', "'old'", ':', "''", ',', "'new'", ':', "'Authorized'", '}', '}', ')', 'else', ':', 'ret', '[', "'result'", ']', '=', 'False', 'if', 'node', 'in', 'authorized_dict', ':', 'ret', '[', "'comment'", ']', '+=', "'Authorization check for node {0} returned: {1}\\n'", '.', 'format', '(', 'node', ',', 'authorized_dict', '[', 'node', ']', ')', 'if', 'node', 'in', 'authorize_dict', ':', 'ret', '[', "'comment'", ']', '+=', "'Failed to authorize {0} with error {1}\\n'", '.', 'format', '(', 'node', ',', 'authorize_dict', '[', 'node', ']', ')', 'return', 'ret']
Ensure all nodes are authorized to the cluster name Irrelevant, not used (recommended: pcs_auth__auth) nodes a list of nodes which should be authorized to the cluster pcsuser user for communication with pcs (default: hacluster) pcspasswd password for pcsuser (default: hacluster) extra_args list of extra args for the \'pcs cluster auth\' command Example: .. code-block:: yaml pcs_auth__auth: pcs.auth: - nodes: - node1.example.com - node2.example.com - pcsuser: hacluster - pcspasswd: hoonetorg - extra_args: []
['Ensure', 'all', 'nodes', 'are', 'authorized', 'to', 'the', 'cluster']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pcs.py#L360-L444
6,560
materialsproject/pymatgen-db
matgendb/vv/validate.py
Sampler.sample
def sample(self, cursor): """Extract records randomly from the database. Continue until the target proportion of the items have been extracted, or until `min_items` if this is larger. If `max_items` is non-negative, do not extract more than these. This function is a generator, yielding items incrementally. :param cursor: Cursor to sample :type cursor: pymongo.cursor.Cursor :return: yields each item :rtype: dict :raise: ValueError, if max_items is valid and less than `min_items` or if target collection is empty """ count = cursor.count() # special case: empty collection if count == 0: self._empty = True raise ValueError("Empty collection") # special case: entire collection if self.p >= 1 and self.max_items <= 0: for item in cursor: yield item return # calculate target number of items to select if self.max_items <= 0: n_target = max(self.min_items, self.p * count) else: if self.p <= 0: n_target = max(self.min_items, self.max_items) else: n_target = max(self.min_items, min(self.max_items, self.p * count)) if n_target == 0: raise ValueError("No items requested") # select first `n_target` items that pop up with # probability self.p # This is actually biased to items at the beginning # of the file if n_target is smaller than (p * count), n = 0 while n < n_target: try: item = next(cursor) except StopIteration: # need to keep looping through data until # we get all our items! cursor.rewind() item = next(cursor) if self._keep(): yield item n += 1
python
def sample(self, cursor): """Extract records randomly from the database. Continue until the target proportion of the items have been extracted, or until `min_items` if this is larger. If `max_items` is non-negative, do not extract more than these. This function is a generator, yielding items incrementally. :param cursor: Cursor to sample :type cursor: pymongo.cursor.Cursor :return: yields each item :rtype: dict :raise: ValueError, if max_items is valid and less than `min_items` or if target collection is empty """ count = cursor.count() # special case: empty collection if count == 0: self._empty = True raise ValueError("Empty collection") # special case: entire collection if self.p >= 1 and self.max_items <= 0: for item in cursor: yield item return # calculate target number of items to select if self.max_items <= 0: n_target = max(self.min_items, self.p * count) else: if self.p <= 0: n_target = max(self.min_items, self.max_items) else: n_target = max(self.min_items, min(self.max_items, self.p * count)) if n_target == 0: raise ValueError("No items requested") # select first `n_target` items that pop up with # probability self.p # This is actually biased to items at the beginning # of the file if n_target is smaller than (p * count), n = 0 while n < n_target: try: item = next(cursor) except StopIteration: # need to keep looping through data until # we get all our items! cursor.rewind() item = next(cursor) if self._keep(): yield item n += 1
['def', 'sample', '(', 'self', ',', 'cursor', ')', ':', 'count', '=', 'cursor', '.', 'count', '(', ')', '# special case: empty collection', 'if', 'count', '==', '0', ':', 'self', '.', '_empty', '=', 'True', 'raise', 'ValueError', '(', '"Empty collection"', ')', '# special case: entire collection', 'if', 'self', '.', 'p', '>=', '1', 'and', 'self', '.', 'max_items', '<=', '0', ':', 'for', 'item', 'in', 'cursor', ':', 'yield', 'item', 'return', '# calculate target number of items to select', 'if', 'self', '.', 'max_items', '<=', '0', ':', 'n_target', '=', 'max', '(', 'self', '.', 'min_items', ',', 'self', '.', 'p', '*', 'count', ')', 'else', ':', 'if', 'self', '.', 'p', '<=', '0', ':', 'n_target', '=', 'max', '(', 'self', '.', 'min_items', ',', 'self', '.', 'max_items', ')', 'else', ':', 'n_target', '=', 'max', '(', 'self', '.', 'min_items', ',', 'min', '(', 'self', '.', 'max_items', ',', 'self', '.', 'p', '*', 'count', ')', ')', 'if', 'n_target', '==', '0', ':', 'raise', 'ValueError', '(', '"No items requested"', ')', '# select first `n_target` items that pop up with', '# probability self.p', '# This is actually biased to items at the beginning', '# of the file if n_target is smaller than (p * count),', 'n', '=', '0', 'while', 'n', '<', 'n_target', ':', 'try', ':', 'item', '=', 'next', '(', 'cursor', ')', 'except', 'StopIteration', ':', '# need to keep looping through data until', '# we get all our items!', 'cursor', '.', 'rewind', '(', ')', 'item', '=', 'next', '(', 'cursor', ')', 'if', 'self', '.', '_keep', '(', ')', ':', 'yield', 'item', 'n', '+=', '1']
Extract records randomly from the database. Continue until the target proportion of the items have been extracted, or until `min_items` if this is larger. If `max_items` is non-negative, do not extract more than these. This function is a generator, yielding items incrementally. :param cursor: Cursor to sample :type cursor: pymongo.cursor.Cursor :return: yields each item :rtype: dict :raise: ValueError, if max_items is valid and less than `min_items` or if target collection is empty
['Extract', 'records', 'randomly', 'from', 'the', 'database', '.', 'Continue', 'until', 'the', 'target', 'proportion', 'of', 'the', 'items', 'have', 'been', 'extracted', 'or', 'until', 'min_items', 'if', 'this', 'is', 'larger', '.', 'If', 'max_items', 'is', 'non', '-', 'negative', 'do', 'not', 'extract', 'more', 'than', 'these', '.']
train
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/vv/validate.py#L669-L723
6,561
caseyjlaw/rtpipe
rtpipe/parsecal.py
telcal_sol.calcgain
def calcgain(self, ant1, ant2, skyfreq, pol): """ Calculates the complex gain product (g1*g2) for a pair of antennas. """ select = self.select[n.where( (self.skyfreq[self.select] == skyfreq) & (self.polarization[self.select] == pol) )[0]] if len(select): # for when telcal solutions don't exist ind1 = n.where(ant1 == self.antnum[select]) ind2 = n.where(ant2 == self.antnum[select]) g1 = self.amp[select][ind1]*n.exp(1j*n.radians(self.phase[select][ind1])) * (not self.flagged.astype(int)[select][ind1][0]) g2 = self.amp[select][ind2]*n.exp(-1j*n.radians(self.phase[select][ind2])) * (not self.flagged.astype(int)[select][ind2][0]) else: g1 = [0]; g2 = [0] try: assert (g1[0] != 0j) and (g2[0] != 0j) invg1g2 = 1./(g1[0]*g2[0]) except (AssertionError, IndexError): invg1g2 = 0 return invg1g2
python
def calcgain(self, ant1, ant2, skyfreq, pol): """ Calculates the complex gain product (g1*g2) for a pair of antennas. """ select = self.select[n.where( (self.skyfreq[self.select] == skyfreq) & (self.polarization[self.select] == pol) )[0]] if len(select): # for when telcal solutions don't exist ind1 = n.where(ant1 == self.antnum[select]) ind2 = n.where(ant2 == self.antnum[select]) g1 = self.amp[select][ind1]*n.exp(1j*n.radians(self.phase[select][ind1])) * (not self.flagged.astype(int)[select][ind1][0]) g2 = self.amp[select][ind2]*n.exp(-1j*n.radians(self.phase[select][ind2])) * (not self.flagged.astype(int)[select][ind2][0]) else: g1 = [0]; g2 = [0] try: assert (g1[0] != 0j) and (g2[0] != 0j) invg1g2 = 1./(g1[0]*g2[0]) except (AssertionError, IndexError): invg1g2 = 0 return invg1g2
['def', 'calcgain', '(', 'self', ',', 'ant1', ',', 'ant2', ',', 'skyfreq', ',', 'pol', ')', ':', 'select', '=', 'self', '.', 'select', '[', 'n', '.', 'where', '(', '(', 'self', '.', 'skyfreq', '[', 'self', '.', 'select', ']', '==', 'skyfreq', ')', '&', '(', 'self', '.', 'polarization', '[', 'self', '.', 'select', ']', '==', 'pol', ')', ')', '[', '0', ']', ']', 'if', 'len', '(', 'select', ')', ':', "# for when telcal solutions don't exist", 'ind1', '=', 'n', '.', 'where', '(', 'ant1', '==', 'self', '.', 'antnum', '[', 'select', ']', ')', 'ind2', '=', 'n', '.', 'where', '(', 'ant2', '==', 'self', '.', 'antnum', '[', 'select', ']', ')', 'g1', '=', 'self', '.', 'amp', '[', 'select', ']', '[', 'ind1', ']', '*', 'n', '.', 'exp', '(', '1j', '*', 'n', '.', 'radians', '(', 'self', '.', 'phase', '[', 'select', ']', '[', 'ind1', ']', ')', ')', '*', '(', 'not', 'self', '.', 'flagged', '.', 'astype', '(', 'int', ')', '[', 'select', ']', '[', 'ind1', ']', '[', '0', ']', ')', 'g2', '=', 'self', '.', 'amp', '[', 'select', ']', '[', 'ind2', ']', '*', 'n', '.', 'exp', '(', '-', '1j', '*', 'n', '.', 'radians', '(', 'self', '.', 'phase', '[', 'select', ']', '[', 'ind2', ']', ')', ')', '*', '(', 'not', 'self', '.', 'flagged', '.', 'astype', '(', 'int', ')', '[', 'select', ']', '[', 'ind2', ']', '[', '0', ']', ')', 'else', ':', 'g1', '=', '[', '0', ']', 'g2', '=', '[', '0', ']', 'try', ':', 'assert', '(', 'g1', '[', '0', ']', '!=', '0j', ')', 'and', '(', 'g2', '[', '0', ']', '!=', '0j', ')', 'invg1g2', '=', '1.', '/', '(', 'g1', '[', '0', ']', '*', 'g2', '[', '0', ']', ')', 'except', '(', 'AssertionError', ',', 'IndexError', ')', ':', 'invg1g2', '=', '0', 'return', 'invg1g2']
Calculates the complex gain product (g1*g2) for a pair of antennas.
['Calculates', 'the', 'complex', 'gain', 'product', '(', 'g1', '*', 'g2', ')', 'for', 'a', 'pair', 'of', 'antennas', '.']
train
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/parsecal.py#L565-L584
6,562
neo4j/neo4j-python-driver
neo4j/types/__init__.py
Record.items
def items(self, *keys): """ Return the fields of the record as a list of key and value tuples :return: """ if keys: d = [] for key in keys: try: i = self.index(key) except KeyError: d.append((key, None)) else: d.append((self.__keys[i], self[i])) return d return list((self.__keys[i], super(Record, self).__getitem__(i)) for i in range(len(self)))
python
def items(self, *keys): """ Return the fields of the record as a list of key and value tuples :return: """ if keys: d = [] for key in keys: try: i = self.index(key) except KeyError: d.append((key, None)) else: d.append((self.__keys[i], self[i])) return d return list((self.__keys[i], super(Record, self).__getitem__(i)) for i in range(len(self)))
['def', 'items', '(', 'self', ',', '*', 'keys', ')', ':', 'if', 'keys', ':', 'd', '=', '[', ']', 'for', 'key', 'in', 'keys', ':', 'try', ':', 'i', '=', 'self', '.', 'index', '(', 'key', ')', 'except', 'KeyError', ':', 'd', '.', 'append', '(', '(', 'key', ',', 'None', ')', ')', 'else', ':', 'd', '.', 'append', '(', '(', 'self', '.', '__keys', '[', 'i', ']', ',', 'self', '[', 'i', ']', ')', ')', 'return', 'd', 'return', 'list', '(', '(', 'self', '.', '__keys', '[', 'i', ']', ',', 'super', '(', 'Record', ',', 'self', ')', '.', '__getitem__', '(', 'i', ')', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'self', ')', ')', ')']
Return the fields of the record as a list of key and value tuples :return:
['Return', 'the', 'fields', 'of', 'the', 'record', 'as', 'a', 'list', 'of', 'key', 'and', 'value', 'tuples']
train
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/__init__.py#L280-L295
6,563
globocom/GloboNetworkAPI-client-python
networkapiclient/Equipamento.py
Equipamento.listar_por_tipo_ambiente
def listar_por_tipo_ambiente(self, id_tipo_equipamento, id_ambiente): """Lista os equipamentos de um tipo e que estão associados a um ambiente. :param id_tipo_equipamento: Identificador do tipo do equipamento. :param id_ambiente: Identificador do ambiente. :return: Dicionário com a seguinte estrutura: :: {'equipamento': [{'id': < id_equipamento >, 'nome': < nome_equipamento >, 'id_tipo_equipamento': < id_tipo_equipamento >, 'nome_tipo_equipamento': < nome_tipo_equipamento >, 'id_modelo': < id_modelo >, 'nome_modelo': < nome_modelo >, 'id_marca': < id_marca >, 'nome_marca': < nome_marca > }, ... demais equipamentos ...]} :raise InvalidParameterError: O identificador do tipo de equipamento e/ou do ambiente são nulos ou inválidos. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta. """ if not is_valid_int_param(id_tipo_equipamento): raise InvalidParameterError( u'O identificador do tipo do equipamento é inválido ou não foi informado.') if not is_valid_int_param(id_ambiente): raise InvalidParameterError( u'O identificador do ambiente é inválido ou não foi informado.') url = 'equipamento/tipoequipamento/' + \ str(id_tipo_equipamento) + '/ambiente/' + str(id_ambiente) + '/' code, xml = self.submit(None, 'GET', url) key = 'equipamento' return get_list_map(self.response(code, xml, [key]), key)
python
def listar_por_tipo_ambiente(self, id_tipo_equipamento, id_ambiente): """Lista os equipamentos de um tipo e que estão associados a um ambiente. :param id_tipo_equipamento: Identificador do tipo do equipamento. :param id_ambiente: Identificador do ambiente. :return: Dicionário com a seguinte estrutura: :: {'equipamento': [{'id': < id_equipamento >, 'nome': < nome_equipamento >, 'id_tipo_equipamento': < id_tipo_equipamento >, 'nome_tipo_equipamento': < nome_tipo_equipamento >, 'id_modelo': < id_modelo >, 'nome_modelo': < nome_modelo >, 'id_marca': < id_marca >, 'nome_marca': < nome_marca > }, ... demais equipamentos ...]} :raise InvalidParameterError: O identificador do tipo de equipamento e/ou do ambiente são nulos ou inválidos. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta. """ if not is_valid_int_param(id_tipo_equipamento): raise InvalidParameterError( u'O identificador do tipo do equipamento é inválido ou não foi informado.') if not is_valid_int_param(id_ambiente): raise InvalidParameterError( u'O identificador do ambiente é inválido ou não foi informado.') url = 'equipamento/tipoequipamento/' + \ str(id_tipo_equipamento) + '/ambiente/' + str(id_ambiente) + '/' code, xml = self.submit(None, 'GET', url) key = 'equipamento' return get_list_map(self.response(code, xml, [key]), key)
['def', 'listar_por_tipo_ambiente', '(', 'self', ',', 'id_tipo_equipamento', ',', 'id_ambiente', ')', ':', 'if', 'not', 'is_valid_int_param', '(', 'id_tipo_equipamento', ')', ':', 'raise', 'InvalidParameterError', '(', "u'O identificador do tipo do equipamento é inválido ou não foi informado.')", '', 'if', 'not', 'is_valid_int_param', '(', 'id_ambiente', ')', ':', 'raise', 'InvalidParameterError', '(', "u'O identificador do ambiente é inválido ou não foi informado.')", '', 'url', '=', "'equipamento/tipoequipamento/'", '+', 'str', '(', 'id_tipo_equipamento', ')', '+', "'/ambiente/'", '+', 'str', '(', 'id_ambiente', ')', '+', "'/'", 'code', ',', 'xml', '=', 'self', '.', 'submit', '(', 'None', ',', "'GET'", ',', 'url', ')', 'key', '=', "'equipamento'", 'return', 'get_list_map', '(', 'self', '.', 'response', '(', 'code', ',', 'xml', ',', '[', 'key', ']', ')', ',', 'key', ')']
Lista os equipamentos de um tipo e que estão associados a um ambiente. :param id_tipo_equipamento: Identificador do tipo do equipamento. :param id_ambiente: Identificador do ambiente. :return: Dicionário com a seguinte estrutura: :: {'equipamento': [{'id': < id_equipamento >, 'nome': < nome_equipamento >, 'id_tipo_equipamento': < id_tipo_equipamento >, 'nome_tipo_equipamento': < nome_tipo_equipamento >, 'id_modelo': < id_modelo >, 'nome_modelo': < nome_modelo >, 'id_marca': < id_marca >, 'nome_marca': < nome_marca > }, ... demais equipamentos ...]} :raise InvalidParameterError: O identificador do tipo de equipamento e/ou do ambiente são nulos ou inválidos. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta.
['Lista', 'os', 'equipamentos', 'de', 'um', 'tipo', 'e', 'que', 'estão', 'associados', 'a', 'um', 'ambiente', '.']
train
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Equipamento.py#L466-L505
6,564
quantmind/pulsar
examples/httpbin/manage.py
HttpBin.get
def get(self, request): '''The home page of this router''' ul = Html('ul') for router in sorted(self.routes, key=lambda r: r.creation_count): a = router.link(escape(router.route.path)) a.addClass(router.name) for method in METHODS: if router.getparam(method): a.addClass(method) li = Html('li', a, ' %s' % router.getparam('title', '')) ul.append(li) title = 'Pulsar' html = request.html_document html.head.title = title html.head.links.append('httpbin.css') html.head.links.append('favicon.ico', rel="icon", type='image/x-icon') html.head.scripts.append('httpbin.js') ul = ul.to_string(request) templ = asset('template.html') body = templ % (title, JAPANESE, CHINESE, version, pyversion, ul) html.body.append(body) return html.http_response(request)
python
def get(self, request): '''The home page of this router''' ul = Html('ul') for router in sorted(self.routes, key=lambda r: r.creation_count): a = router.link(escape(router.route.path)) a.addClass(router.name) for method in METHODS: if router.getparam(method): a.addClass(method) li = Html('li', a, ' %s' % router.getparam('title', '')) ul.append(li) title = 'Pulsar' html = request.html_document html.head.title = title html.head.links.append('httpbin.css') html.head.links.append('favicon.ico', rel="icon", type='image/x-icon') html.head.scripts.append('httpbin.js') ul = ul.to_string(request) templ = asset('template.html') body = templ % (title, JAPANESE, CHINESE, version, pyversion, ul) html.body.append(body) return html.http_response(request)
['def', 'get', '(', 'self', ',', 'request', ')', ':', 'ul', '=', 'Html', '(', "'ul'", ')', 'for', 'router', 'in', 'sorted', '(', 'self', '.', 'routes', ',', 'key', '=', 'lambda', 'r', ':', 'r', '.', 'creation_count', ')', ':', 'a', '=', 'router', '.', 'link', '(', 'escape', '(', 'router', '.', 'route', '.', 'path', ')', ')', 'a', '.', 'addClass', '(', 'router', '.', 'name', ')', 'for', 'method', 'in', 'METHODS', ':', 'if', 'router', '.', 'getparam', '(', 'method', ')', ':', 'a', '.', 'addClass', '(', 'method', ')', 'li', '=', 'Html', '(', "'li'", ',', 'a', ',', "' %s'", '%', 'router', '.', 'getparam', '(', "'title'", ',', "''", ')', ')', 'ul', '.', 'append', '(', 'li', ')', 'title', '=', "'Pulsar'", 'html', '=', 'request', '.', 'html_document', 'html', '.', 'head', '.', 'title', '=', 'title', 'html', '.', 'head', '.', 'links', '.', 'append', '(', "'httpbin.css'", ')', 'html', '.', 'head', '.', 'links', '.', 'append', '(', "'favicon.ico'", ',', 'rel', '=', '"icon"', ',', 'type', '=', "'image/x-icon'", ')', 'html', '.', 'head', '.', 'scripts', '.', 'append', '(', "'httpbin.js'", ')', 'ul', '=', 'ul', '.', 'to_string', '(', 'request', ')', 'templ', '=', 'asset', '(', "'template.html'", ')', 'body', '=', 'templ', '%', '(', 'title', ',', 'JAPANESE', ',', 'CHINESE', ',', 'version', ',', 'pyversion', ',', 'ul', ')', 'html', '.', 'body', '.', 'append', '(', 'body', ')', 'return', 'html', '.', 'http_response', '(', 'request', ')']
The home page of this router
['The', 'home', 'page', 'of', 'this', 'router']
train
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/examples/httpbin/manage.py#L106-L127
6,565
buzzfeed/caliendo
caliendo/facade.py
Wrapper.wrapper__ignore
def wrapper__ignore(self, type_): """ Selectively ignore certain types when wrapping attributes. :param class type: The class/type definition to ignore. :rtype list(type): The current list of ignored types """ if type_ not in self.__exclusion_list: self.__exclusion_list.append(type_) return self.__exclusion_list
python
def wrapper__ignore(self, type_): """ Selectively ignore certain types when wrapping attributes. :param class type: The class/type definition to ignore. :rtype list(type): The current list of ignored types """ if type_ not in self.__exclusion_list: self.__exclusion_list.append(type_) return self.__exclusion_list
['def', 'wrapper__ignore', '(', 'self', ',', 'type_', ')', ':', 'if', 'type_', 'not', 'in', 'self', '.', '__exclusion_list', ':', 'self', '.', '__exclusion_list', '.', 'append', '(', 'type_', ')', 'return', 'self', '.', '__exclusion_list']
Selectively ignore certain types when wrapping attributes. :param class type: The class/type definition to ignore. :rtype list(type): The current list of ignored types
['Selectively', 'ignore', 'certain', 'types', 'when', 'wrapping', 'attributes', '.']
train
https://github.com/buzzfeed/caliendo/blob/1628a10f7782ad67c0422b5cbc9bf4979ac40abc/caliendo/facade.py#L86-L96
6,566
lsst-sqre/documenteer
documenteer/sphinxconfig/utils.py
get_filepaths_with_extension
def get_filepaths_with_extension(extname, root_dir='.'): """Get relative filepaths of files in a directory, and sub-directories, with the given extension. Parameters ---------- extname : `str` Extension name (e.g. 'txt', 'rst'). Extension comparison is case-insensitive. root_dir : `str`, optional Root directory. Current working directory by default. Returns ------- filepaths : `list` of `str` File paths, relative to ``root_dir``, with the given extension. """ # needed for comparison with os.path.splitext if not extname.startswith('.'): extname = '.' + extname # for case-insensitivity extname = extname.lower() root_dir = os.path.abspath(root_dir) selected_filenames = [] for dirname, sub_dirnames, filenames in os.walk(root_dir): for filename in filenames: if os.path.splitext(filename)[-1].lower() == extname: full_filename = os.path.join(dirname, filename) selected_filenames.append( os.path.relpath(full_filename, start=root_dir)) return selected_filenames
python
def get_filepaths_with_extension(extname, root_dir='.'): """Get relative filepaths of files in a directory, and sub-directories, with the given extension. Parameters ---------- extname : `str` Extension name (e.g. 'txt', 'rst'). Extension comparison is case-insensitive. root_dir : `str`, optional Root directory. Current working directory by default. Returns ------- filepaths : `list` of `str` File paths, relative to ``root_dir``, with the given extension. """ # needed for comparison with os.path.splitext if not extname.startswith('.'): extname = '.' + extname # for case-insensitivity extname = extname.lower() root_dir = os.path.abspath(root_dir) selected_filenames = [] for dirname, sub_dirnames, filenames in os.walk(root_dir): for filename in filenames: if os.path.splitext(filename)[-1].lower() == extname: full_filename = os.path.join(dirname, filename) selected_filenames.append( os.path.relpath(full_filename, start=root_dir)) return selected_filenames
['def', 'get_filepaths_with_extension', '(', 'extname', ',', 'root_dir', '=', "'.'", ')', ':', '# needed for comparison with os.path.splitext', 'if', 'not', 'extname', '.', 'startswith', '(', "'.'", ')', ':', 'extname', '=', "'.'", '+', 'extname', '# for case-insensitivity', 'extname', '=', 'extname', '.', 'lower', '(', ')', 'root_dir', '=', 'os', '.', 'path', '.', 'abspath', '(', 'root_dir', ')', 'selected_filenames', '=', '[', ']', 'for', 'dirname', ',', 'sub_dirnames', ',', 'filenames', 'in', 'os', '.', 'walk', '(', 'root_dir', ')', ':', 'for', 'filename', 'in', 'filenames', ':', 'if', 'os', '.', 'path', '.', 'splitext', '(', 'filename', ')', '[', '-', '1', ']', '.', 'lower', '(', ')', '==', 'extname', ':', 'full_filename', '=', 'os', '.', 'path', '.', 'join', '(', 'dirname', ',', 'filename', ')', 'selected_filenames', '.', 'append', '(', 'os', '.', 'path', '.', 'relpath', '(', 'full_filename', ',', 'start', '=', 'root_dir', ')', ')', 'return', 'selected_filenames']
Get relative filepaths of files in a directory, and sub-directories, with the given extension. Parameters ---------- extname : `str` Extension name (e.g. 'txt', 'rst'). Extension comparison is case-insensitive. root_dir : `str`, optional Root directory. Current working directory by default. Returns ------- filepaths : `list` of `str` File paths, relative to ``root_dir``, with the given extension.
['Get', 'relative', 'filepaths', 'of', 'files', 'in', 'a', 'directory', 'and', 'sub', '-', 'directories', 'with', 'the', 'given', 'extension', '.']
train
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L83-L116
6,567
thespacedoctor/fundamentals
fundamentals/times.py
calculate_time_difference
def calculate_time_difference(startDate, endDate): """ *Return the time difference between two dates as a string* **Key Arguments:** - ``startDate`` -- the first date in YYYY-MM-DDTHH:MM:SS format - ``endDate`` -- the final date YYYY-MM-DDTHH:MM:SS format **Return:** - ``relTime`` -- the difference between the two dates in Y,M,D,h,m,s (string) **Usage:** .. code-block:: python from fundamentals import times diff = times.calculate_time_difference(startDate="2015-10-13 10:02:12", endDate="2017-11-04 16:47:05") print diff # OUT: 2yrs 22dys 6h 44m 53s """ ################ > IMPORTS ################ from datetime import datetime from dateutil import relativedelta ################ > VARIABLE SETTINGS ###### ################ >ACTION(S) ################ if "T" not in startDate: startDate = startDate.strip().replace(" ", "T") if "T" not in endDate: endDate = endDate.strip().replace(" ", "T") startDate = datetime.strptime(startDate, '%Y-%m-%dT%H:%M:%S') endDate = datetime.strptime(endDate, '%Y-%m-%dT%H:%M:%S') d = relativedelta.relativedelta(endDate, startDate) relTime = "" if d.years > 0: relTime += str(d.years) + "yrs " if d.months > 0: relTime += str(d.months) + "mths " if d.days > 0: relTime += str(d.days) + "dys " if d.hours > 0: relTime += str(d.hours) + "h " if d.minutes > 0: relTime += str(d.minutes) + "m " if d.seconds > 0: relTime += str(d.seconds) + "s" ############################### if relTime == "": relTime = "0s" return relTime
python
def calculate_time_difference(startDate, endDate): """ *Return the time difference between two dates as a string* **Key Arguments:** - ``startDate`` -- the first date in YYYY-MM-DDTHH:MM:SS format - ``endDate`` -- the final date YYYY-MM-DDTHH:MM:SS format **Return:** - ``relTime`` -- the difference between the two dates in Y,M,D,h,m,s (string) **Usage:** .. code-block:: python from fundamentals import times diff = times.calculate_time_difference(startDate="2015-10-13 10:02:12", endDate="2017-11-04 16:47:05") print diff # OUT: 2yrs 22dys 6h 44m 53s """ ################ > IMPORTS ################ from datetime import datetime from dateutil import relativedelta ################ > VARIABLE SETTINGS ###### ################ >ACTION(S) ################ if "T" not in startDate: startDate = startDate.strip().replace(" ", "T") if "T" not in endDate: endDate = endDate.strip().replace(" ", "T") startDate = datetime.strptime(startDate, '%Y-%m-%dT%H:%M:%S') endDate = datetime.strptime(endDate, '%Y-%m-%dT%H:%M:%S') d = relativedelta.relativedelta(endDate, startDate) relTime = "" if d.years > 0: relTime += str(d.years) + "yrs " if d.months > 0: relTime += str(d.months) + "mths " if d.days > 0: relTime += str(d.days) + "dys " if d.hours > 0: relTime += str(d.hours) + "h " if d.minutes > 0: relTime += str(d.minutes) + "m " if d.seconds > 0: relTime += str(d.seconds) + "s" ############################### if relTime == "": relTime = "0s" return relTime
['def', 'calculate_time_difference', '(', 'startDate', ',', 'endDate', ')', ':', '################ > IMPORTS ################', 'from', 'datetime', 'import', 'datetime', 'from', 'dateutil', 'import', 'relativedelta', '################ > VARIABLE SETTINGS ######', '################ >ACTION(S) ################', 'if', '"T"', 'not', 'in', 'startDate', ':', 'startDate', '=', 'startDate', '.', 'strip', '(', ')', '.', 'replace', '(', '" "', ',', '"T"', ')', 'if', '"T"', 'not', 'in', 'endDate', ':', 'endDate', '=', 'endDate', '.', 'strip', '(', ')', '.', 'replace', '(', '" "', ',', '"T"', ')', 'startDate', '=', 'datetime', '.', 'strptime', '(', 'startDate', ',', "'%Y-%m-%dT%H:%M:%S'", ')', 'endDate', '=', 'datetime', '.', 'strptime', '(', 'endDate', ',', "'%Y-%m-%dT%H:%M:%S'", ')', 'd', '=', 'relativedelta', '.', 'relativedelta', '(', 'endDate', ',', 'startDate', ')', 'relTime', '=', '""', 'if', 'd', '.', 'years', '>', '0', ':', 'relTime', '+=', 'str', '(', 'd', '.', 'years', ')', '+', '"yrs "', 'if', 'd', '.', 'months', '>', '0', ':', 'relTime', '+=', 'str', '(', 'd', '.', 'months', ')', '+', '"mths "', 'if', 'd', '.', 'days', '>', '0', ':', 'relTime', '+=', 'str', '(', 'd', '.', 'days', ')', '+', '"dys "', 'if', 'd', '.', 'hours', '>', '0', ':', 'relTime', '+=', 'str', '(', 'd', '.', 'hours', ')', '+', '"h "', 'if', 'd', '.', 'minutes', '>', '0', ':', 'relTime', '+=', 'str', '(', 'd', '.', 'minutes', ')', '+', '"m "', 'if', 'd', '.', 'seconds', '>', '0', ':', 'relTime', '+=', 'str', '(', 'd', '.', 'seconds', ')', '+', '"s"', '###############################', 'if', 'relTime', '==', '""', ':', 'relTime', '=', '"0s"', 'return', 'relTime']
*Return the time difference between two dates as a string* **Key Arguments:** - ``startDate`` -- the first date in YYYY-MM-DDTHH:MM:SS format - ``endDate`` -- the final date YYYY-MM-DDTHH:MM:SS format **Return:** - ``relTime`` -- the difference between the two dates in Y,M,D,h,m,s (string) **Usage:** .. code-block:: python from fundamentals import times diff = times.calculate_time_difference(startDate="2015-10-13 10:02:12", endDate="2017-11-04 16:47:05") print diff # OUT: 2yrs 22dys 6h 44m 53s
['*', 'Return', 'the', 'time', 'difference', 'between', 'two', 'dates', 'as', 'a', 'string', '*']
train
https://github.com/thespacedoctor/fundamentals/blob/1d2c007ac74442ec2eabde771cfcacdb9c1ab382/fundamentals/times.py#L42-L95
6,568
posativ/isso
isso/wsgi.py
urlsplit
def urlsplit(name): """ Parse :param:`name` into (netloc, port, ssl) """ if not (isinstance(name, string_types)): name = str(name) if not name.startswith(('http://', 'https://')): name = 'http://' + name rv = urlparse(name) if rv.scheme == 'https' and rv.port is None: return rv.netloc, 443, True return rv.netloc.rsplit(':')[0], rv.port or 80, rv.scheme == 'https'
python
def urlsplit(name): """ Parse :param:`name` into (netloc, port, ssl) """ if not (isinstance(name, string_types)): name = str(name) if not name.startswith(('http://', 'https://')): name = 'http://' + name rv = urlparse(name) if rv.scheme == 'https' and rv.port is None: return rv.netloc, 443, True return rv.netloc.rsplit(':')[0], rv.port or 80, rv.scheme == 'https'
['def', 'urlsplit', '(', 'name', ')', ':', 'if', 'not', '(', 'isinstance', '(', 'name', ',', 'string_types', ')', ')', ':', 'name', '=', 'str', '(', 'name', ')', 'if', 'not', 'name', '.', 'startswith', '(', '(', "'http://'", ',', "'https://'", ')', ')', ':', 'name', '=', "'http://'", '+', 'name', 'rv', '=', 'urlparse', '(', 'name', ')', 'if', 'rv', '.', 'scheme', '==', "'https'", 'and', 'rv', '.', 'port', 'is', 'None', ':', 'return', 'rv', '.', 'netloc', ',', '443', ',', 'True', 'return', 'rv', '.', 'netloc', '.', 'rsplit', '(', "':'", ')', '[', '0', ']', ',', 'rv', '.', 'port', 'or', '80', ',', 'rv', '.', 'scheme', '==', "'https'"]
Parse :param:`name` into (netloc, port, ssl)
['Parse', ':', 'param', ':', 'name', 'into', '(', 'netloc', 'port', 'ssl', ')']
train
https://github.com/posativ/isso/blob/78997f491044b7d694ac7170edc32030544095b7/isso/wsgi.py#L50-L64
6,569
LuqueDaniel/pybooru
pybooru/api_moebooru.py
MoebooruApi_Mixin.post_revert_tags
def post_revert_tags(self, post_id, history_id): """Function to reverts a post to a previous set of tags (Requires login) (UNTESTED). Parameters: post_id (int): The post id number to update. history_id (int): The id number of the tag history. """ params = {'id': post_id, 'history_id': history_id} return self._get('post/revert_tags', params, 'PUT')
python
def post_revert_tags(self, post_id, history_id): """Function to reverts a post to a previous set of tags (Requires login) (UNTESTED). Parameters: post_id (int): The post id number to update. history_id (int): The id number of the tag history. """ params = {'id': post_id, 'history_id': history_id} return self._get('post/revert_tags', params, 'PUT')
['def', 'post_revert_tags', '(', 'self', ',', 'post_id', ',', 'history_id', ')', ':', 'params', '=', '{', "'id'", ':', 'post_id', ',', "'history_id'", ':', 'history_id', '}', 'return', 'self', '.', '_get', '(', "'post/revert_tags'", ',', 'params', ',', "'PUT'", ')']
Function to reverts a post to a previous set of tags (Requires login) (UNTESTED). Parameters: post_id (int): The post id number to update. history_id (int): The id number of the tag history.
['Function', 'to', 'reverts', 'a', 'post', 'to', 'a', 'previous', 'set', 'of', 'tags', '(', 'Requires', 'login', ')', '(', 'UNTESTED', ')', '.']
train
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_moebooru.py#L126-L135
6,570
CellProfiler/centrosome
centrosome/cpmorphology.py
calculate_solidity
def calculate_solidity(labels,indexes=None): """Calculate the area of each label divided by the area of its convex hull labels - a label matrix indexes - the indexes of the labels to measure """ if indexes is not None: """ Convert to compat 32bit integer """ indexes = np.array(indexes,dtype=np.int32) areas = scind.sum(np.ones(labels.shape),labels,indexes) convex_hull_areas = calculate_convex_hull_areas(labels, indexes) return areas / convex_hull_areas
python
def calculate_solidity(labels,indexes=None): """Calculate the area of each label divided by the area of its convex hull labels - a label matrix indexes - the indexes of the labels to measure """ if indexes is not None: """ Convert to compat 32bit integer """ indexes = np.array(indexes,dtype=np.int32) areas = scind.sum(np.ones(labels.shape),labels,indexes) convex_hull_areas = calculate_convex_hull_areas(labels, indexes) return areas / convex_hull_areas
['def', 'calculate_solidity', '(', 'labels', ',', 'indexes', '=', 'None', ')', ':', 'if', 'indexes', 'is', 'not', 'None', ':', '""" Convert to compat 32bit integer """', 'indexes', '=', 'np', '.', 'array', '(', 'indexes', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'areas', '=', 'scind', '.', 'sum', '(', 'np', '.', 'ones', '(', 'labels', '.', 'shape', ')', ',', 'labels', ',', 'indexes', ')', 'convex_hull_areas', '=', 'calculate_convex_hull_areas', '(', 'labels', ',', 'indexes', ')', 'return', 'areas', '/', 'convex_hull_areas']
Calculate the area of each label divided by the area of its convex hull labels - a label matrix indexes - the indexes of the labels to measure
['Calculate', 'the', 'area', 'of', 'each', 'label', 'divided', 'by', 'the', 'area', 'of', 'its', 'convex', 'hull', 'labels', '-', 'a', 'label', 'matrix', 'indexes', '-', 'the', 'indexes', 'of', 'the', 'labels', 'to', 'measure']
train
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L2466-L2477
6,571
PixelwarStudio/PyTree
Tree/core.py
generate_branches
def generate_branches(scales=None, angles=None, shift_angle=0): """Generates branches with alternative system. Args: scales (tuple/array): Indicating how the branch/es length/es develop/s from age to age. angles (tuple/array): Holding the branch and shift angle in radians. shift_angle (float): Holding the rotation angle for all branches. Returns: branches (2d-array): A array constits of arrays holding scale and angle for every branch. """ branches = [] for pos, scale in enumerate(scales): angle = -sum(angles)/2 + sum(angles[:pos]) + shift_angle branches.append([scale, angle]) return branches
python
def generate_branches(scales=None, angles=None, shift_angle=0): """Generates branches with alternative system. Args: scales (tuple/array): Indicating how the branch/es length/es develop/s from age to age. angles (tuple/array): Holding the branch and shift angle in radians. shift_angle (float): Holding the rotation angle for all branches. Returns: branches (2d-array): A array constits of arrays holding scale and angle for every branch. """ branches = [] for pos, scale in enumerate(scales): angle = -sum(angles)/2 + sum(angles[:pos]) + shift_angle branches.append([scale, angle]) return branches
['def', 'generate_branches', '(', 'scales', '=', 'None', ',', 'angles', '=', 'None', ',', 'shift_angle', '=', '0', ')', ':', 'branches', '=', '[', ']', 'for', 'pos', ',', 'scale', 'in', 'enumerate', '(', 'scales', ')', ':', 'angle', '=', '-', 'sum', '(', 'angles', ')', '/', '2', '+', 'sum', '(', 'angles', '[', ':', 'pos', ']', ')', '+', 'shift_angle', 'branches', '.', 'append', '(', '[', 'scale', ',', 'angle', ']', ')', 'return', 'branches']
Generates branches with alternative system. Args: scales (tuple/array): Indicating how the branch/es length/es develop/s from age to age. angles (tuple/array): Holding the branch and shift angle in radians. shift_angle (float): Holding the rotation angle for all branches. Returns: branches (2d-array): A array constits of arrays holding scale and angle for every branch.
['Generates', 'branches', 'with', 'alternative', 'system', '.']
train
https://github.com/PixelwarStudio/PyTree/blob/f14b25ea145da6b00d836e34251d2a4c823766dc/Tree/core.py#L225-L240
6,572
cloudnull/turbolift
turbolift/methods/__init__.py
BaseMethod._index_fs
def _index_fs(self): """Returns a deque object full of local file system items. :returns: ``deque`` """ indexed_objects = self._return_deque() directory = self.job_args.get('directory') if directory: indexed_objects = self._return_deque( deque=indexed_objects, item=self._drectory_local_files( directory=directory ) ) object_names = self.job_args.get('object') if object_names: indexed_objects = self._return_deque( deque=indexed_objects, item=self._named_local_files( object_names=object_names ) ) return indexed_objects
python
def _index_fs(self): """Returns a deque object full of local file system items. :returns: ``deque`` """ indexed_objects = self._return_deque() directory = self.job_args.get('directory') if directory: indexed_objects = self._return_deque( deque=indexed_objects, item=self._drectory_local_files( directory=directory ) ) object_names = self.job_args.get('object') if object_names: indexed_objects = self._return_deque( deque=indexed_objects, item=self._named_local_files( object_names=object_names ) ) return indexed_objects
['def', '_index_fs', '(', 'self', ')', ':', 'indexed_objects', '=', 'self', '.', '_return_deque', '(', ')', 'directory', '=', 'self', '.', 'job_args', '.', 'get', '(', "'directory'", ')', 'if', 'directory', ':', 'indexed_objects', '=', 'self', '.', '_return_deque', '(', 'deque', '=', 'indexed_objects', ',', 'item', '=', 'self', '.', '_drectory_local_files', '(', 'directory', '=', 'directory', ')', ')', 'object_names', '=', 'self', '.', 'job_args', '.', 'get', '(', "'object'", ')', 'if', 'object_names', ':', 'indexed_objects', '=', 'self', '.', '_return_deque', '(', 'deque', '=', 'indexed_objects', ',', 'item', '=', 'self', '.', '_named_local_files', '(', 'object_names', '=', 'object_names', ')', ')', 'return', 'indexed_objects']
Returns a deque object full of local file system items. :returns: ``deque``
['Returns', 'a', 'deque', 'object', 'full', 'of', 'local', 'file', 'system', 'items', '.']
train
https://github.com/cloudnull/turbolift/blob/da33034e88959226529ce762e2895e6f6356c448/turbolift/methods/__init__.py#L497-L523
6,573
cgtobi/PyRMVtransport
RMVtransport/rmvjourney.py
RMVJourney._departure
def _departure(self) -> datetime: """Extract departure time.""" departure_time = datetime.strptime( self.journey.MainStop.BasicStop.Dep.Time.text, "%H:%M" ).time() if departure_time > (self.now - timedelta(hours=1)).time(): return datetime.combine(self.now.date(), departure_time) return datetime.combine(self.now.date() + timedelta(days=1), departure_time)
python
def _departure(self) -> datetime: """Extract departure time.""" departure_time = datetime.strptime( self.journey.MainStop.BasicStop.Dep.Time.text, "%H:%M" ).time() if departure_time > (self.now - timedelta(hours=1)).time(): return datetime.combine(self.now.date(), departure_time) return datetime.combine(self.now.date() + timedelta(days=1), departure_time)
['def', '_departure', '(', 'self', ')', '->', 'datetime', ':', 'departure_time', '=', 'datetime', '.', 'strptime', '(', 'self', '.', 'journey', '.', 'MainStop', '.', 'BasicStop', '.', 'Dep', '.', 'Time', '.', 'text', ',', '"%H:%M"', ')', '.', 'time', '(', ')', 'if', 'departure_time', '>', '(', 'self', '.', 'now', '-', 'timedelta', '(', 'hours', '=', '1', ')', ')', '.', 'time', '(', ')', ':', 'return', 'datetime', '.', 'combine', '(', 'self', '.', 'now', '.', 'date', '(', ')', ',', 'departure_time', ')', 'return', 'datetime', '.', 'combine', '(', 'self', '.', 'now', '.', 'date', '(', ')', '+', 'timedelta', '(', 'days', '=', '1', ')', ',', 'departure_time', ')']
Extract departure time.
['Extract', 'departure', 'time', '.']
train
https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvjourney.py#L49-L56
6,574
pbrod/numdifftools
src/numdifftools/nd_algopy.py
directionaldiff
def directionaldiff(f, x0, vec, **options): """ Return directional derivative of a function of n variables Parameters ---------- fun: callable analytical function to differentiate. x0: array vector location at which to differentiate fun. If x0 is an nxm array, then fun is assumed to be a function of n*m variables. vec: array vector defining the line along which to take the derivative. It should be the same size as x0, but need not be a vector of unit length. **options: optional arguments to pass on to Derivative. Returns ------- dder: scalar estimate of the first derivative of fun in the specified direction. Examples -------- At the global minimizer (1,1) of the Rosenbrock function, compute the directional derivative in the direction [1 2] >>> import numpy as np >>> import numdifftools as nd >>> vec = np.r_[1, 2] >>> rosen = lambda x: (1-x[0])**2 + 105*(x[1]-x[0]**2)**2 >>> dd, info = nd.directionaldiff(rosen, [1, 1], vec, full_output=True) >>> np.allclose(dd, 0) True >>> np.abs(info.error_estimate)<1e-14 True See also -------- Derivative, Gradient """ x0 = np.asarray(x0) vec = np.asarray(vec) if x0.size != vec.size: raise ValueError('vec and x0 must be the same shapes') vec = np.reshape(vec/np.linalg.norm(vec.ravel()), x0.shape) return Derivative(lambda t: f(x0+t*vec), **options)(0)
python
def directionaldiff(f, x0, vec, **options): """ Return directional derivative of a function of n variables Parameters ---------- fun: callable analytical function to differentiate. x0: array vector location at which to differentiate fun. If x0 is an nxm array, then fun is assumed to be a function of n*m variables. vec: array vector defining the line along which to take the derivative. It should be the same size as x0, but need not be a vector of unit length. **options: optional arguments to pass on to Derivative. Returns ------- dder: scalar estimate of the first derivative of fun in the specified direction. Examples -------- At the global minimizer (1,1) of the Rosenbrock function, compute the directional derivative in the direction [1 2] >>> import numpy as np >>> import numdifftools as nd >>> vec = np.r_[1, 2] >>> rosen = lambda x: (1-x[0])**2 + 105*(x[1]-x[0]**2)**2 >>> dd, info = nd.directionaldiff(rosen, [1, 1], vec, full_output=True) >>> np.allclose(dd, 0) True >>> np.abs(info.error_estimate)<1e-14 True See also -------- Derivative, Gradient """ x0 = np.asarray(x0) vec = np.asarray(vec) if x0.size != vec.size: raise ValueError('vec and x0 must be the same shapes') vec = np.reshape(vec/np.linalg.norm(vec.ravel()), x0.shape) return Derivative(lambda t: f(x0+t*vec), **options)(0)
['def', 'directionaldiff', '(', 'f', ',', 'x0', ',', 'vec', ',', '*', '*', 'options', ')', ':', 'x0', '=', 'np', '.', 'asarray', '(', 'x0', ')', 'vec', '=', 'np', '.', 'asarray', '(', 'vec', ')', 'if', 'x0', '.', 'size', '!=', 'vec', '.', 'size', ':', 'raise', 'ValueError', '(', "'vec and x0 must be the same shapes'", ')', 'vec', '=', 'np', '.', 'reshape', '(', 'vec', '/', 'np', '.', 'linalg', '.', 'norm', '(', 'vec', '.', 'ravel', '(', ')', ')', ',', 'x0', '.', 'shape', ')', 'return', 'Derivative', '(', 'lambda', 't', ':', 'f', '(', 'x0', '+', 't', '*', 'vec', ')', ',', '*', '*', 'options', ')', '(', '0', ')']
Return directional derivative of a function of n variables Parameters ---------- fun: callable analytical function to differentiate. x0: array vector location at which to differentiate fun. If x0 is an nxm array, then fun is assumed to be a function of n*m variables. vec: array vector defining the line along which to take the derivative. It should be the same size as x0, but need not be a vector of unit length. **options: optional arguments to pass on to Derivative. Returns ------- dder: scalar estimate of the first derivative of fun in the specified direction. Examples -------- At the global minimizer (1,1) of the Rosenbrock function, compute the directional derivative in the direction [1 2] >>> import numpy as np >>> import numdifftools as nd >>> vec = np.r_[1, 2] >>> rosen = lambda x: (1-x[0])**2 + 105*(x[1]-x[0]**2)**2 >>> dd, info = nd.directionaldiff(rosen, [1, 1], vec, full_output=True) >>> np.allclose(dd, 0) True >>> np.abs(info.error_estimate)<1e-14 True See also -------- Derivative, Gradient
['Return', 'directional', 'derivative', 'of', 'a', 'function', 'of', 'n', 'variables']
train
https://github.com/pbrod/numdifftools/blob/2c88878df732c9c6629febea56e7a91fd898398d/src/numdifftools/nd_algopy.py#L485-L533
6,575
SBRG/ssbio
ssbio/protein/sequence/properties/aggregation_propensity.py
AMYLPRED.get_aggregation_propensity
def get_aggregation_propensity(self, seq, outdir, cutoff_v=5, cutoff_n=5, run_amylmuts=False): """Run the AMYLPRED2 web server for a protein sequence and get the consensus result for aggregation propensity. Args: seq (str, Seq, SeqRecord): Amino acid sequence outdir (str): Directory to where output files should be saved cutoff_v (int): The minimal number of methods that agree on a residue being a aggregation-prone residue cutoff_n (int): The minimal number of consecutive residues to be considered as a 'stretch' of aggregation-prone region run_amylmuts (bool): If AMYLMUTS method should be run, default False. AMYLMUTS is optional as it is the most time consuming and generates a slightly different result every submission. Returns: int: Aggregation propensity - the number of aggregation-prone segments on an unfolded protein sequence """ seq = ssbio.protein.sequence.utils.cast_to_str(seq) results = self.run_amylpred2(seq=seq, outdir=outdir, run_amylmuts=run_amylmuts) agg_index, agg_conf = self.parse_for_consensus_aggregation(N=len(seq), results=results, cutoff_v=cutoff_v, cutoff_n=cutoff_n) return agg_index
python
def get_aggregation_propensity(self, seq, outdir, cutoff_v=5, cutoff_n=5, run_amylmuts=False): """Run the AMYLPRED2 web server for a protein sequence and get the consensus result for aggregation propensity. Args: seq (str, Seq, SeqRecord): Amino acid sequence outdir (str): Directory to where output files should be saved cutoff_v (int): The minimal number of methods that agree on a residue being a aggregation-prone residue cutoff_n (int): The minimal number of consecutive residues to be considered as a 'stretch' of aggregation-prone region run_amylmuts (bool): If AMYLMUTS method should be run, default False. AMYLMUTS is optional as it is the most time consuming and generates a slightly different result every submission. Returns: int: Aggregation propensity - the number of aggregation-prone segments on an unfolded protein sequence """ seq = ssbio.protein.sequence.utils.cast_to_str(seq) results = self.run_amylpred2(seq=seq, outdir=outdir, run_amylmuts=run_amylmuts) agg_index, agg_conf = self.parse_for_consensus_aggregation(N=len(seq), results=results, cutoff_v=cutoff_v, cutoff_n=cutoff_n) return agg_index
['def', 'get_aggregation_propensity', '(', 'self', ',', 'seq', ',', 'outdir', ',', 'cutoff_v', '=', '5', ',', 'cutoff_n', '=', '5', ',', 'run_amylmuts', '=', 'False', ')', ':', 'seq', '=', 'ssbio', '.', 'protein', '.', 'sequence', '.', 'utils', '.', 'cast_to_str', '(', 'seq', ')', 'results', '=', 'self', '.', 'run_amylpred2', '(', 'seq', '=', 'seq', ',', 'outdir', '=', 'outdir', ',', 'run_amylmuts', '=', 'run_amylmuts', ')', 'agg_index', ',', 'agg_conf', '=', 'self', '.', 'parse_for_consensus_aggregation', '(', 'N', '=', 'len', '(', 'seq', ')', ',', 'results', '=', 'results', ',', 'cutoff_v', '=', 'cutoff_v', ',', 'cutoff_n', '=', 'cutoff_n', ')', 'return', 'agg_index']
Run the AMYLPRED2 web server for a protein sequence and get the consensus result for aggregation propensity. Args: seq (str, Seq, SeqRecord): Amino acid sequence outdir (str): Directory to where output files should be saved cutoff_v (int): The minimal number of methods that agree on a residue being a aggregation-prone residue cutoff_n (int): The minimal number of consecutive residues to be considered as a 'stretch' of aggregation-prone region run_amylmuts (bool): If AMYLMUTS method should be run, default False. AMYLMUTS is optional as it is the most time consuming and generates a slightly different result every submission. Returns: int: Aggregation propensity - the number of aggregation-prone segments on an unfolded protein sequence
['Run', 'the', 'AMYLPRED2', 'web', 'server', 'for', 'a', 'protein', 'sequence', 'and', 'get', 'the', 'consensus', 'result', 'for', 'aggregation', 'propensity', '.']
train
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/aggregation_propensity.py#L66-L88
6,576
ultrabug/py3status
py3status/util.py
Gradients.hsv_2_hex
def hsv_2_hex(self, h, s, v): """ convert a hsv color to hex """ return self.rgb_2_hex(*hsv_to_rgb(h, s, v))
python
def hsv_2_hex(self, h, s, v): """ convert a hsv color to hex """ return self.rgb_2_hex(*hsv_to_rgb(h, s, v))
['def', 'hsv_2_hex', '(', 'self', ',', 'h', ',', 's', ',', 'v', ')', ':', 'return', 'self', '.', 'rgb_2_hex', '(', '*', 'hsv_to_rgb', '(', 'h', ',', 's', ',', 'v', ')', ')']
convert a hsv color to hex
['convert', 'a', 'hsv', 'color', 'to', 'hex']
train
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/util.py#L40-L44
6,577
gem/oq-engine
openquake/hazardlib/geo/surface/multi.py
MultiSurface.get_min_distance
def get_min_distance(self, mesh): """ For each point in ``mesh`` compute the minimum distance to each surface element and return the smallest value. See :meth:`superclass method <.base.BaseSurface.get_min_distance>` for spec of input and result values. """ dists = [surf.get_min_distance(mesh) for surf in self.surfaces] return numpy.min(dists, axis=0)
python
def get_min_distance(self, mesh): """ For each point in ``mesh`` compute the minimum distance to each surface element and return the smallest value. See :meth:`superclass method <.base.BaseSurface.get_min_distance>` for spec of input and result values. """ dists = [surf.get_min_distance(mesh) for surf in self.surfaces] return numpy.min(dists, axis=0)
['def', 'get_min_distance', '(', 'self', ',', 'mesh', ')', ':', 'dists', '=', '[', 'surf', '.', 'get_min_distance', '(', 'mesh', ')', 'for', 'surf', 'in', 'self', '.', 'surfaces', ']', 'return', 'numpy', '.', 'min', '(', 'dists', ',', 'axis', '=', '0', ')']
For each point in ``mesh`` compute the minimum distance to each surface element and return the smallest value. See :meth:`superclass method <.base.BaseSurface.get_min_distance>` for spec of input and result values.
['For', 'each', 'point', 'in', 'mesh', 'compute', 'the', 'minimum', 'distance', 'to', 'each', 'surface', 'element', 'and', 'return', 'the', 'smallest', 'value', '.']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/surface/multi.py#L161-L172
6,578
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py
mavfile.set_relay
def set_relay(self, relay_pin=0, state=True): '''Set relay_pin to value of state''' if self.mavlink10(): self.mav.command_long_send( self.target_system, # target_system self.target_component, # target_component mavlink.MAV_CMD_DO_SET_RELAY, # command 0, # Confirmation relay_pin, # Relay Number int(state), # state (1 to indicate arm) 0, # param3 (all other params meaningless) 0, # param4 0, # param5 0, # param6 0) # param7 else: print("Setting relays not supported.")
python
def set_relay(self, relay_pin=0, state=True): '''Set relay_pin to value of state''' if self.mavlink10(): self.mav.command_long_send( self.target_system, # target_system self.target_component, # target_component mavlink.MAV_CMD_DO_SET_RELAY, # command 0, # Confirmation relay_pin, # Relay Number int(state), # state (1 to indicate arm) 0, # param3 (all other params meaningless) 0, # param4 0, # param5 0, # param6 0) # param7 else: print("Setting relays not supported.")
['def', 'set_relay', '(', 'self', ',', 'relay_pin', '=', '0', ',', 'state', '=', 'True', ')', ':', 'if', 'self', '.', 'mavlink10', '(', ')', ':', 'self', '.', 'mav', '.', 'command_long_send', '(', 'self', '.', 'target_system', ',', '# target_system', 'self', '.', 'target_component', ',', '# target_component', 'mavlink', '.', 'MAV_CMD_DO_SET_RELAY', ',', '# command', '0', ',', '# Confirmation', 'relay_pin', ',', '# Relay Number', 'int', '(', 'state', ')', ',', '# state (1 to indicate arm)', '0', ',', '# param3 (all other params meaningless)', '0', ',', '# param4', '0', ',', '# param5', '0', ',', '# param6', '0', ')', '# param7', 'else', ':', 'print', '(', '"Setting relays not supported."', ')']
Set relay_pin to value of state
['Set', 'relay_pin', 'to', 'value', 'of', 'state']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py#L598-L614
6,579
pycampers/ampy
ampy/files.py
Files.rmdir
def rmdir(self, directory, missing_okay=False): """Forcefully remove the specified directory and all its children.""" # Build a script to walk an entire directory structure and delete every # file and subfolder. This is tricky because MicroPython has no os.walk # or similar function to walk folders, so this code does it manually # with recursion and changing directories. For each directory it lists # the files and deletes everything it can, i.e. all the files. Then # it lists the files again and assumes they are directories (since they # couldn't be deleted in the first pass) and recursively clears those # subdirectories. Finally when finished clearing all the children the # parent directory is deleted. command = """ try: import os except ImportError: import uos as os def rmdir(directory): os.chdir(directory) for f in os.listdir(): try: os.remove(f) except OSError: pass for f in os.listdir(): rmdir(f) os.chdir('..') os.rmdir(directory) rmdir('{0}') """.format( directory ) self._pyboard.enter_raw_repl() try: out = self._pyboard.exec_(textwrap.dedent(command)) except PyboardError as ex: message = ex.args[2].decode("utf-8") # Check if this is an OSError #2, i.e. directory doesn't exist # and rethrow it as something more descriptive. if message.find("OSError: [Errno 2] ENOENT") != -1: if not missing_okay: raise RuntimeError("No such directory: {0}".format(directory)) else: raise ex self._pyboard.exit_raw_repl()
python
def rmdir(self, directory, missing_okay=False): """Forcefully remove the specified directory and all its children.""" # Build a script to walk an entire directory structure and delete every # file and subfolder. This is tricky because MicroPython has no os.walk # or similar function to walk folders, so this code does it manually # with recursion and changing directories. For each directory it lists # the files and deletes everything it can, i.e. all the files. Then # it lists the files again and assumes they are directories (since they # couldn't be deleted in the first pass) and recursively clears those # subdirectories. Finally when finished clearing all the children the # parent directory is deleted. command = """ try: import os except ImportError: import uos as os def rmdir(directory): os.chdir(directory) for f in os.listdir(): try: os.remove(f) except OSError: pass for f in os.listdir(): rmdir(f) os.chdir('..') os.rmdir(directory) rmdir('{0}') """.format( directory ) self._pyboard.enter_raw_repl() try: out = self._pyboard.exec_(textwrap.dedent(command)) except PyboardError as ex: message = ex.args[2].decode("utf-8") # Check if this is an OSError #2, i.e. directory doesn't exist # and rethrow it as something more descriptive. if message.find("OSError: [Errno 2] ENOENT") != -1: if not missing_okay: raise RuntimeError("No such directory: {0}".format(directory)) else: raise ex self._pyboard.exit_raw_repl()
['def', 'rmdir', '(', 'self', ',', 'directory', ',', 'missing_okay', '=', 'False', ')', ':', '# Build a script to walk an entire directory structure and delete every', '# file and subfolder. This is tricky because MicroPython has no os.walk', '# or similar function to walk folders, so this code does it manually', '# with recursion and changing directories. For each directory it lists', '# the files and deletes everything it can, i.e. all the files. Then', '# it lists the files again and assumes they are directories (since they', "# couldn't be deleted in the first pass) and recursively clears those", '# subdirectories. Finally when finished clearing all the children the', '# parent directory is deleted.', 'command', '=', '"""\n try:\n import os\n except ImportError:\n import uos as os\n def rmdir(directory):\n os.chdir(directory)\n for f in os.listdir():\n try:\n os.remove(f)\n except OSError:\n pass\n for f in os.listdir():\n rmdir(f)\n os.chdir(\'..\')\n os.rmdir(directory)\n rmdir(\'{0}\')\n """', '.', 'format', '(', 'directory', ')', 'self', '.', '_pyboard', '.', 'enter_raw_repl', '(', ')', 'try', ':', 'out', '=', 'self', '.', '_pyboard', '.', 'exec_', '(', 'textwrap', '.', 'dedent', '(', 'command', ')', ')', 'except', 'PyboardError', 'as', 'ex', ':', 'message', '=', 'ex', '.', 'args', '[', '2', ']', '.', 'decode', '(', '"utf-8"', ')', "# Check if this is an OSError #2, i.e. directory doesn't exist", '# and rethrow it as something more descriptive.', 'if', 'message', '.', 'find', '(', '"OSError: [Errno 2] ENOENT"', ')', '!=', '-', '1', ':', 'if', 'not', 'missing_okay', ':', 'raise', 'RuntimeError', '(', '"No such directory: {0}"', '.', 'format', '(', 'directory', ')', ')', 'else', ':', 'raise', 'ex', 'self', '.', '_pyboard', '.', 'exit_raw_repl', '(', ')']
Forcefully remove the specified directory and all its children.
['Forcefully', 'remove', 'the', 'specified', 'directory', 'and', 'all', 'its', 'children', '.']
train
https://github.com/pycampers/ampy/blob/6851f8b177c334f5ff7bd43bf07307a437433ba2/ampy/files.py#L249-L292
6,580
bitcraze/crazyflie-lib-python
cflib/positioning/motion_commander.py
MotionCommander.land
def land(self, velocity=VELOCITY): """ Go straight down and turn off the motors. Do not call this function if you use the with keyword. Landing is done automatically when the context goes out of scope. :param velocity: The velocity (meters/second) when going down :return: """ if self._is_flying: self.down(self._thread.get_height(), velocity) self._thread.stop() self._thread = None self._cf.commander.send_stop_setpoint() self._is_flying = False
python
def land(self, velocity=VELOCITY): """ Go straight down and turn off the motors. Do not call this function if you use the with keyword. Landing is done automatically when the context goes out of scope. :param velocity: The velocity (meters/second) when going down :return: """ if self._is_flying: self.down(self._thread.get_height(), velocity) self._thread.stop() self._thread = None self._cf.commander.send_stop_setpoint() self._is_flying = False
['def', 'land', '(', 'self', ',', 'velocity', '=', 'VELOCITY', ')', ':', 'if', 'self', '.', '_is_flying', ':', 'self', '.', 'down', '(', 'self', '.', '_thread', '.', 'get_height', '(', ')', ',', 'velocity', ')', 'self', '.', '_thread', '.', 'stop', '(', ')', 'self', '.', '_thread', '=', 'None', 'self', '.', '_cf', '.', 'commander', '.', 'send_stop_setpoint', '(', ')', 'self', '.', '_is_flying', '=', 'False']
Go straight down and turn off the motors. Do not call this function if you use the with keyword. Landing is done automatically when the context goes out of scope. :param velocity: The velocity (meters/second) when going down :return:
['Go', 'straight', 'down', 'and', 'turn', 'off', 'the', 'motors', '.']
train
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/positioning/motion_commander.py#L109-L126
6,581
disqus/disqus-python
disqusapi/utils.py
build_interfaces_by_method
def build_interfaces_by_method(interfaces): """ Create new dictionary from INTERFACES hashed by method then the endpoints name. For use when using the disqusapi by the method interface instead of the endpoint interface. For instance: 'blacklists': { 'add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } } is translated to: 'POST': { 'blacklists.add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } """ def traverse(block, parts): try: method = block['method'].lower() except KeyError: for k, v in compat.iteritems(block): traverse(v, parts + [k]) else: path = '.'.join(parts) try: methods[method] except KeyError: methods[method] = {} methods[method][path] = block methods = {} for key, val in compat.iteritems(interfaces): traverse(val, [key]) return methods
python
def build_interfaces_by_method(interfaces): """ Create new dictionary from INTERFACES hashed by method then the endpoints name. For use when using the disqusapi by the method interface instead of the endpoint interface. For instance: 'blacklists': { 'add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } } is translated to: 'POST': { 'blacklists.add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } """ def traverse(block, parts): try: method = block['method'].lower() except KeyError: for k, v in compat.iteritems(block): traverse(v, parts + [k]) else: path = '.'.join(parts) try: methods[method] except KeyError: methods[method] = {} methods[method][path] = block methods = {} for key, val in compat.iteritems(interfaces): traverse(val, [key]) return methods
['def', 'build_interfaces_by_method', '(', 'interfaces', ')', ':', 'def', 'traverse', '(', 'block', ',', 'parts', ')', ':', 'try', ':', 'method', '=', 'block', '[', "'method'", ']', '.', 'lower', '(', ')', 'except', 'KeyError', ':', 'for', 'k', ',', 'v', 'in', 'compat', '.', 'iteritems', '(', 'block', ')', ':', 'traverse', '(', 'v', ',', 'parts', '+', '[', 'k', ']', ')', 'else', ':', 'path', '=', "'.'", '.', 'join', '(', 'parts', ')', 'try', ':', 'methods', '[', 'method', ']', 'except', 'KeyError', ':', 'methods', '[', 'method', ']', '=', '{', '}', 'methods', '[', 'method', ']', '[', 'path', ']', '=', 'block', 'methods', '=', '{', '}', 'for', 'key', ',', 'val', 'in', 'compat', '.', 'iteritems', '(', 'interfaces', ')', ':', 'traverse', '(', 'val', ',', '[', 'key', ']', ')', 'return', 'methods']
Create new dictionary from INTERFACES hashed by method then the endpoints name. For use when using the disqusapi by the method interface instead of the endpoint interface. For instance: 'blacklists': { 'add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } } is translated to: 'POST': { 'blacklists.add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] }
['Create', 'new', 'dictionary', 'from', 'INTERFACES', 'hashed', 'by', 'method', 'then', 'the', 'endpoints', 'name', '.', 'For', 'use', 'when', 'using', 'the', 'disqusapi', 'by', 'the', 'method', 'interface', 'instead', 'of', 'the', 'endpoint', 'interface', '.', 'For', 'instance', ':']
train
https://github.com/disqus/disqus-python/blob/605f33c7b735fcb85e16041c27658fbba49d7a7b/disqusapi/utils.py#L8-L48
6,582
mpg-age-bioinformatics/AGEpy
AGEpy/bed.py
dfTObedtool
def dfTObedtool(df): """ Transforms a pandas dataframe into a bedtool :param df: Pandas dataframe :returns: a bedtool """ df=df.astype(str) df=df.drop_duplicates() df=df.values.tolist() df=["\t".join(s) for s in df ] df="\n".join(df) df=BedTool(df, from_string=True) return df
python
def dfTObedtool(df): """ Transforms a pandas dataframe into a bedtool :param df: Pandas dataframe :returns: a bedtool """ df=df.astype(str) df=df.drop_duplicates() df=df.values.tolist() df=["\t".join(s) for s in df ] df="\n".join(df) df=BedTool(df, from_string=True) return df
['def', 'dfTObedtool', '(', 'df', ')', ':', 'df', '=', 'df', '.', 'astype', '(', 'str', ')', 'df', '=', 'df', '.', 'drop_duplicates', '(', ')', 'df', '=', 'df', '.', 'values', '.', 'tolist', '(', ')', 'df', '=', '[', '"\\t"', '.', 'join', '(', 's', ')', 'for', 's', 'in', 'df', ']', 'df', '=', '"\\n"', '.', 'join', '(', 'df', ')', 'df', '=', 'BedTool', '(', 'df', ',', 'from_string', '=', 'True', ')', 'return', 'df']
Transforms a pandas dataframe into a bedtool :param df: Pandas dataframe :returns: a bedtool
['Transforms', 'a', 'pandas', 'dataframe', 'into', 'a', 'bedtool']
train
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/bed.py#L55-L70
6,583
pandas-dev/pandas
pandas/io/sql.py
SQLDatabase.to_sql
def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None, schema=None, chunksize=None, dtype=None, method=None): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string Name of SQL table. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If specified, this overwrites the default schema of the SQLDatabase object. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type. If all columns are of the same type, one single value can be used. method : {None', 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if dtype and not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} if dtype is not None: from sqlalchemy.types import to_instance, TypeEngine for col, my_type in dtype.items(): if not isinstance(to_instance(my_type), TypeEngine): raise ValueError('The type of {column} is not a ' 'SQLAlchemy type '.format(column=col)) table = SQLTable(name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, schema=schema, dtype=dtype) table.create() table.insert(chunksize, method=method) if (not name.isdigit() and not name.islower()): # check for potentially case sensitivity issues (GH7815) # Only check when name is not a number and name is not lower case engine = self.connectable.engine with self.connectable.connect() as conn: table_names = engine.table_names( schema=schema or self.meta.schema, connection=conn, ) if name not in table_names: msg = ( "The provided table name '{0}' is not found exactly as " "such in the database after writing the table, possibly " "due to case sensitivity issues. Consider using lower " "case table names." ).format(name) warnings.warn(msg, UserWarning)
python
def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None, schema=None, chunksize=None, dtype=None, method=None): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string Name of SQL table. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If specified, this overwrites the default schema of the SQLDatabase object. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type. If all columns are of the same type, one single value can be used. method : {None', 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if dtype and not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} if dtype is not None: from sqlalchemy.types import to_instance, TypeEngine for col, my_type in dtype.items(): if not isinstance(to_instance(my_type), TypeEngine): raise ValueError('The type of {column} is not a ' 'SQLAlchemy type '.format(column=col)) table = SQLTable(name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, schema=schema, dtype=dtype) table.create() table.insert(chunksize, method=method) if (not name.isdigit() and not name.islower()): # check for potentially case sensitivity issues (GH7815) # Only check when name is not a number and name is not lower case engine = self.connectable.engine with self.connectable.connect() as conn: table_names = engine.table_names( schema=schema or self.meta.schema, connection=conn, ) if name not in table_names: msg = ( "The provided table name '{0}' is not found exactly as " "such in the database after writing the table, possibly " "due to case sensitivity issues. Consider using lower " "case table names." ).format(name) warnings.warn(msg, UserWarning)
['def', 'to_sql', '(', 'self', ',', 'frame', ',', 'name', ',', 'if_exists', '=', "'fail'", ',', 'index', '=', 'True', ',', 'index_label', '=', 'None', ',', 'schema', '=', 'None', ',', 'chunksize', '=', 'None', ',', 'dtype', '=', 'None', ',', 'method', '=', 'None', ')', ':', 'if', 'dtype', 'and', 'not', 'is_dict_like', '(', 'dtype', ')', ':', 'dtype', '=', '{', 'col_name', ':', 'dtype', 'for', 'col_name', 'in', 'frame', '}', 'if', 'dtype', 'is', 'not', 'None', ':', 'from', 'sqlalchemy', '.', 'types', 'import', 'to_instance', ',', 'TypeEngine', 'for', 'col', ',', 'my_type', 'in', 'dtype', '.', 'items', '(', ')', ':', 'if', 'not', 'isinstance', '(', 'to_instance', '(', 'my_type', ')', ',', 'TypeEngine', ')', ':', 'raise', 'ValueError', '(', "'The type of {column} is not a '", "'SQLAlchemy type '", '.', 'format', '(', 'column', '=', 'col', ')', ')', 'table', '=', 'SQLTable', '(', 'name', ',', 'self', ',', 'frame', '=', 'frame', ',', 'index', '=', 'index', ',', 'if_exists', '=', 'if_exists', ',', 'index_label', '=', 'index_label', ',', 'schema', '=', 'schema', ',', 'dtype', '=', 'dtype', ')', 'table', '.', 'create', '(', ')', 'table', '.', 'insert', '(', 'chunksize', ',', 'method', '=', 'method', ')', 'if', '(', 'not', 'name', '.', 'isdigit', '(', ')', 'and', 'not', 'name', '.', 'islower', '(', ')', ')', ':', '# check for potentially case sensitivity issues (GH7815)', '# Only check when name is not a number and name is not lower case', 'engine', '=', 'self', '.', 'connectable', '.', 'engine', 'with', 'self', '.', 'connectable', '.', 'connect', '(', ')', 'as', 'conn', ':', 'table_names', '=', 'engine', '.', 'table_names', '(', 'schema', '=', 'schema', 'or', 'self', '.', 'meta', '.', 'schema', ',', 'connection', '=', 'conn', ',', ')', 'if', 'name', 'not', 'in', 'table_names', ':', 'msg', '=', '(', '"The provided table name \'{0}\' is not found exactly as "', '"such in the database after writing the table, possibly "', '"due to case sensitivity issues. Consider using lower "', '"case table names."', ')', '.', 'format', '(', 'name', ')', 'warnings', '.', 'warn', '(', 'msg', ',', 'UserWarning', ')']
Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string Name of SQL table. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If specified, this overwrites the default schema of the SQLDatabase object. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type. If all columns are of the same type, one single value can be used. method : {None', 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0
['Write', 'records', 'stored', 'in', 'a', 'DataFrame', 'to', 'a', 'SQL', 'database', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L1106-L1181
6,584
priestc/moneywagon
moneywagon/tx.py
Transaction._get_utxos
def _get_utxos(self, address, services, **modes): """ Using the service fallback engine, get utxos from remote service. """ return get_unspent_outputs( self.crypto, address, services=services, **modes )
python
def _get_utxos(self, address, services, **modes): """ Using the service fallback engine, get utxos from remote service. """ return get_unspent_outputs( self.crypto, address, services=services, **modes )
['def', '_get_utxos', '(', 'self', ',', 'address', ',', 'services', ',', '*', '*', 'modes', ')', ':', 'return', 'get_unspent_outputs', '(', 'self', '.', 'crypto', ',', 'address', ',', 'services', '=', 'services', ',', '*', '*', 'modes', ')']
Using the service fallback engine, get utxos from remote service.
['Using', 'the', 'service', 'fallback', 'engine', 'get', 'utxos', 'from', 'remote', 'service', '.']
train
https://github.com/priestc/moneywagon/blob/00518f1f557dcca8b3031f46d3564c2baa0227a3/moneywagon/tx.py#L58-L65
6,585
dadadel/pyment
pyment/docstring.py
DocsTools._get_options
def _get_options(self, style): """Get the list of keywords for a particular style :param style: the style that the keywords are wanted """ return [self.opt[o][style]['name'] for o in self.opt]
python
def _get_options(self, style): """Get the list of keywords for a particular style :param style: the style that the keywords are wanted """ return [self.opt[o][style]['name'] for o in self.opt]
['def', '_get_options', '(', 'self', ',', 'style', ')', ':', 'return', '[', 'self', '.', 'opt', '[', 'o', ']', '[', 'style', ']', '[', "'name'", ']', 'for', 'o', 'in', 'self', '.', 'opt', ']']
Get the list of keywords for a particular style :param style: the style that the keywords are wanted
['Get', 'the', 'list', 'of', 'keywords', 'for', 'a', 'particular', 'style']
train
https://github.com/dadadel/pyment/blob/3d1bdf87d083ff56230bd0bf7c5252e20552b7b6/pyment/docstring.py#L713-L719
6,586
acorg/dark-matter
dark/reads.py
Reads.save
def save(self, filename, format_='fasta'): """ Write the reads to C{filename} in the requested format. @param filename: Either a C{str} file name to save into (the file will be overwritten) or an open file descriptor (e.g., sys.stdout). @param format_: A C{str} format to save as, either 'fasta', 'fastq' or 'fasta-ss'. @raise ValueError: if C{format_} is 'fastq' and a read with no quality is present, or if an unknown format is requested. @return: An C{int} giving the number of reads in C{self}. """ format_ = format_.lower() count = 0 if isinstance(filename, str): try: with open(filename, 'w') as fp: for read in self: fp.write(read.toString(format_)) count += 1 except ValueError: unlink(filename) raise else: # We have a file-like object. for read in self: filename.write(read.toString(format_)) count += 1 return count
python
def save(self, filename, format_='fasta'): """ Write the reads to C{filename} in the requested format. @param filename: Either a C{str} file name to save into (the file will be overwritten) or an open file descriptor (e.g., sys.stdout). @param format_: A C{str} format to save as, either 'fasta', 'fastq' or 'fasta-ss'. @raise ValueError: if C{format_} is 'fastq' and a read with no quality is present, or if an unknown format is requested. @return: An C{int} giving the number of reads in C{self}. """ format_ = format_.lower() count = 0 if isinstance(filename, str): try: with open(filename, 'w') as fp: for read in self: fp.write(read.toString(format_)) count += 1 except ValueError: unlink(filename) raise else: # We have a file-like object. for read in self: filename.write(read.toString(format_)) count += 1 return count
['def', 'save', '(', 'self', ',', 'filename', ',', 'format_', '=', "'fasta'", ')', ':', 'format_', '=', 'format_', '.', 'lower', '(', ')', 'count', '=', '0', 'if', 'isinstance', '(', 'filename', ',', 'str', ')', ':', 'try', ':', 'with', 'open', '(', 'filename', ',', "'w'", ')', 'as', 'fp', ':', 'for', 'read', 'in', 'self', ':', 'fp', '.', 'write', '(', 'read', '.', 'toString', '(', 'format_', ')', ')', 'count', '+=', '1', 'except', 'ValueError', ':', 'unlink', '(', 'filename', ')', 'raise', 'else', ':', '# We have a file-like object.', 'for', 'read', 'in', 'self', ':', 'filename', '.', 'write', '(', 'read', '.', 'toString', '(', 'format_', ')', ')', 'count', '+=', '1', 'return', 'count']
Write the reads to C{filename} in the requested format. @param filename: Either a C{str} file name to save into (the file will be overwritten) or an open file descriptor (e.g., sys.stdout). @param format_: A C{str} format to save as, either 'fasta', 'fastq' or 'fasta-ss'. @raise ValueError: if C{format_} is 'fastq' and a read with no quality is present, or if an unknown format is requested. @return: An C{int} giving the number of reads in C{self}.
['Write', 'the', 'reads', 'to', 'C', '{', 'filename', '}', 'in', 'the', 'requested', 'format', '.']
train
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/reads.py#L1323-L1352
6,587
aaronn/django-rest-framework-passwordless
drfpasswordless/utils.py
send_email_with_callback_token
def send_email_with_callback_token(user, email_token, **kwargs): """ Sends a Email to user.email. Passes silently without sending in test environment """ try: if api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS: # Make sure we have a sending address before sending. # Get email subject and message email_subject = kwargs.get('email_subject', api_settings.PASSWORDLESS_EMAIL_SUBJECT) email_plaintext = kwargs.get('email_plaintext', api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE) email_html = kwargs.get('email_html', api_settings.PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME) # Inject context if user specifies. context = inject_template_context({'callback_token': email_token.key, }) html_message = loader.render_to_string(email_html, context,) send_mail( email_subject, email_plaintext % email_token.key, api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS, [getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)], fail_silently=False, html_message=html_message,) else: logger.debug("Failed to send token email. Missing PASSWORDLESS_EMAIL_NOREPLY_ADDRESS.") return False return True except Exception as e: logger.debug("Failed to send token email to user: %d." "Possibly no email on user object. Email entered was %s" % (user.id, getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME))) logger.debug(e) return False
python
def send_email_with_callback_token(user, email_token, **kwargs): """ Sends a Email to user.email. Passes silently without sending in test environment """ try: if api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS: # Make sure we have a sending address before sending. # Get email subject and message email_subject = kwargs.get('email_subject', api_settings.PASSWORDLESS_EMAIL_SUBJECT) email_plaintext = kwargs.get('email_plaintext', api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE) email_html = kwargs.get('email_html', api_settings.PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME) # Inject context if user specifies. context = inject_template_context({'callback_token': email_token.key, }) html_message = loader.render_to_string(email_html, context,) send_mail( email_subject, email_plaintext % email_token.key, api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS, [getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)], fail_silently=False, html_message=html_message,) else: logger.debug("Failed to send token email. Missing PASSWORDLESS_EMAIL_NOREPLY_ADDRESS.") return False return True except Exception as e: logger.debug("Failed to send token email to user: %d." "Possibly no email on user object. Email entered was %s" % (user.id, getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME))) logger.debug(e) return False
['def', 'send_email_with_callback_token', '(', 'user', ',', 'email_token', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'if', 'api_settings', '.', 'PASSWORDLESS_EMAIL_NOREPLY_ADDRESS', ':', '# Make sure we have a sending address before sending.', '# Get email subject and message', 'email_subject', '=', 'kwargs', '.', 'get', '(', "'email_subject'", ',', 'api_settings', '.', 'PASSWORDLESS_EMAIL_SUBJECT', ')', 'email_plaintext', '=', 'kwargs', '.', 'get', '(', "'email_plaintext'", ',', 'api_settings', '.', 'PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE', ')', 'email_html', '=', 'kwargs', '.', 'get', '(', "'email_html'", ',', 'api_settings', '.', 'PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME', ')', '# Inject context if user specifies.', 'context', '=', 'inject_template_context', '(', '{', "'callback_token'", ':', 'email_token', '.', 'key', ',', '}', ')', 'html_message', '=', 'loader', '.', 'render_to_string', '(', 'email_html', ',', 'context', ',', ')', 'send_mail', '(', 'email_subject', ',', 'email_plaintext', '%', 'email_token', '.', 'key', ',', 'api_settings', '.', 'PASSWORDLESS_EMAIL_NOREPLY_ADDRESS', ',', '[', 'getattr', '(', 'user', ',', 'api_settings', '.', 'PASSWORDLESS_USER_EMAIL_FIELD_NAME', ')', ']', ',', 'fail_silently', '=', 'False', ',', 'html_message', '=', 'html_message', ',', ')', 'else', ':', 'logger', '.', 'debug', '(', '"Failed to send token email. Missing PASSWORDLESS_EMAIL_NOREPLY_ADDRESS."', ')', 'return', 'False', 'return', 'True', 'except', 'Exception', 'as', 'e', ':', 'logger', '.', 'debug', '(', '"Failed to send token email to user: %d."', '"Possibly no email on user object. Email entered was %s"', '%', '(', 'user', '.', 'id', ',', 'getattr', '(', 'user', ',', 'api_settings', '.', 'PASSWORDLESS_USER_EMAIL_FIELD_NAME', ')', ')', ')', 'logger', '.', 'debug', '(', 'e', ')', 'return', 'False']
Sends a Email to user.email. Passes silently without sending in test environment
['Sends', 'a', 'Email', 'to', 'user', '.', 'email', '.']
train
https://github.com/aaronn/django-rest-framework-passwordless/blob/cd3f229cbc24de9c4b65768395ab5ba8ac1aaf1a/drfpasswordless/utils.py#L105-L145
6,588
pyQode/pyqode.core
pyqode/core/dialogs/goto.py
DlgGotoLine.get_line
def get_line(cls, parent, current_line, line_count): """ Gets user selected line. :param parent: Parent widget :param current_line: Current line number :param line_count: Number of lines in the current text document. :returns: tuple(line, status) status is False if the dialog has been rejected. """ dlg = DlgGotoLine(parent, current_line + 1, line_count) if dlg.exec_() == dlg.Accepted: return dlg.spinBox.value() - 1, True return current_line, False
python
def get_line(cls, parent, current_line, line_count): """ Gets user selected line. :param parent: Parent widget :param current_line: Current line number :param line_count: Number of lines in the current text document. :returns: tuple(line, status) status is False if the dialog has been rejected. """ dlg = DlgGotoLine(parent, current_line + 1, line_count) if dlg.exec_() == dlg.Accepted: return dlg.spinBox.value() - 1, True return current_line, False
['def', 'get_line', '(', 'cls', ',', 'parent', ',', 'current_line', ',', 'line_count', ')', ':', 'dlg', '=', 'DlgGotoLine', '(', 'parent', ',', 'current_line', '+', '1', ',', 'line_count', ')', 'if', 'dlg', '.', 'exec_', '(', ')', '==', 'dlg', '.', 'Accepted', ':', 'return', 'dlg', '.', 'spinBox', '.', 'value', '(', ')', '-', '1', ',', 'True', 'return', 'current_line', ',', 'False']
Gets user selected line. :param parent: Parent widget :param current_line: Current line number :param line_count: Number of lines in the current text document. :returns: tuple(line, status) status is False if the dialog has been rejected.
['Gets', 'user', 'selected', 'line', '.']
train
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/dialogs/goto.py#L28-L42
6,589
raphaelvallat/pingouin
pingouin/distribution.py
homoscedasticity
def homoscedasticity(*args, alpha=.05): """Test equality of variance. Parameters ---------- sample1, sample2,... : array_like Array of sample data. May be different lengths. Returns ------- equal_var : boolean True if data have equal variance. p : float P-value. See Also -------- normality : Test the univariate normality of one or more array(s). sphericity : Mauchly's test for sphericity. Notes ----- This function first tests if the data are normally distributed using the Shapiro-Wilk test. If yes, then the homogeneity of variances is measured using the Bartlett test. If the data are not normally distributed, the Levene (1960) test, which is less sensitive to departure from normality, is used. The **Bartlett** :math:`T` statistic is defined as: .. math:: T = \\frac{(N-k) \\ln{s^{2}_{p}} - \\sum_{i=1}^{k}(N_{i} - 1) \\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\\sum_{i=1}^{k}{1/(N_{i} - 1))} - 1/(N-k))} where :math:`s_i^2` is the variance of the :math:`i^{th}` group, :math:`N` is the total sample size, :math:`N_i` is the sample size of the :math:`i^{th}` group, :math:`k` is the number of groups, and :math:`s_p^2` is the pooled variance. The pooled variance is a weighted average of the group variances and is defined as: .. math:: s^{2}_{p} = \\sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k) The p-value is then computed using a chi-square distribution: .. math:: T \\sim \\chi^2(k-1) The **Levene** :math:`W` statistic is defined as: .. math:: W = \\frac{(N-k)} {(k-1)} \\frac{\\sum_{i=1}^{k}N_{i}(\\overline{Z}_{i.}-\\overline{Z})^{2} } {\\sum_{i=1}^{k}\\sum_{j=1}^{N_i}(Z_{ij}-\\overline{Z}_{i.})^{2} } where :math:`Z_{ij} = |Y_{ij} - median({Y}_{i.})|`, :math:`\\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and :math:`\\overline{Z}` is the grand mean of :math:`Z_{ij}`. The p-value is then computed using a F-distribution: .. math:: W \\sim F(k-1, N-k) References ---------- .. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical tests. Proc. R. Soc. Lond. A, 160(901), 268-282. .. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the equality of variances. Journal of the American Statistical Association, 69(346), 364-367. .. [3] NIST/SEMATECH e-Handbook of Statistical Methods, http://www.itl.nist.gov/div898/handbook/ Examples -------- Test the homoscedasticity of two arrays. >>> import numpy as np >>> from pingouin import homoscedasticity >>> np.random.seed(123) >>> # Scale = standard deviation of the distribution. >>> x = np.random.normal(loc=0, scale=1., size=100) >>> y = np.random.normal(loc=0, scale=0.8,size=100) >>> equal_var, p = homoscedasticity(x, y, alpha=.05) >>> print(round(np.var(x), 3), round(np.var(y), 3), equal_var, p) 1.273 0.602 False 0.0 """ from scipy.stats import levene, bartlett k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") # Test normality of data normal, _ = normality(*args) if np.count_nonzero(normal) != normal.size: # print('Data are not normally distributed. Using Levene test.') _, p = levene(*args) else: _, p = bartlett(*args) equal_var = True if p > alpha else False return equal_var, np.round(p, 3)
python
def homoscedasticity(*args, alpha=.05): """Test equality of variance. Parameters ---------- sample1, sample2,... : array_like Array of sample data. May be different lengths. Returns ------- equal_var : boolean True if data have equal variance. p : float P-value. See Also -------- normality : Test the univariate normality of one or more array(s). sphericity : Mauchly's test for sphericity. Notes ----- This function first tests if the data are normally distributed using the Shapiro-Wilk test. If yes, then the homogeneity of variances is measured using the Bartlett test. If the data are not normally distributed, the Levene (1960) test, which is less sensitive to departure from normality, is used. The **Bartlett** :math:`T` statistic is defined as: .. math:: T = \\frac{(N-k) \\ln{s^{2}_{p}} - \\sum_{i=1}^{k}(N_{i} - 1) \\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\\sum_{i=1}^{k}{1/(N_{i} - 1))} - 1/(N-k))} where :math:`s_i^2` is the variance of the :math:`i^{th}` group, :math:`N` is the total sample size, :math:`N_i` is the sample size of the :math:`i^{th}` group, :math:`k` is the number of groups, and :math:`s_p^2` is the pooled variance. The pooled variance is a weighted average of the group variances and is defined as: .. math:: s^{2}_{p} = \\sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k) The p-value is then computed using a chi-square distribution: .. math:: T \\sim \\chi^2(k-1) The **Levene** :math:`W` statistic is defined as: .. math:: W = \\frac{(N-k)} {(k-1)} \\frac{\\sum_{i=1}^{k}N_{i}(\\overline{Z}_{i.}-\\overline{Z})^{2} } {\\sum_{i=1}^{k}\\sum_{j=1}^{N_i}(Z_{ij}-\\overline{Z}_{i.})^{2} } where :math:`Z_{ij} = |Y_{ij} - median({Y}_{i.})|`, :math:`\\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and :math:`\\overline{Z}` is the grand mean of :math:`Z_{ij}`. The p-value is then computed using a F-distribution: .. math:: W \\sim F(k-1, N-k) References ---------- .. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical tests. Proc. R. Soc. Lond. A, 160(901), 268-282. .. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the equality of variances. Journal of the American Statistical Association, 69(346), 364-367. .. [3] NIST/SEMATECH e-Handbook of Statistical Methods, http://www.itl.nist.gov/div898/handbook/ Examples -------- Test the homoscedasticity of two arrays. >>> import numpy as np >>> from pingouin import homoscedasticity >>> np.random.seed(123) >>> # Scale = standard deviation of the distribution. >>> x = np.random.normal(loc=0, scale=1., size=100) >>> y = np.random.normal(loc=0, scale=0.8,size=100) >>> equal_var, p = homoscedasticity(x, y, alpha=.05) >>> print(round(np.var(x), 3), round(np.var(y), 3), equal_var, p) 1.273 0.602 False 0.0 """ from scipy.stats import levene, bartlett k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") # Test normality of data normal, _ = normality(*args) if np.count_nonzero(normal) != normal.size: # print('Data are not normally distributed. Using Levene test.') _, p = levene(*args) else: _, p = bartlett(*args) equal_var = True if p > alpha else False return equal_var, np.round(p, 3)
['def', 'homoscedasticity', '(', '*', 'args', ',', 'alpha', '=', '.05', ')', ':', 'from', 'scipy', '.', 'stats', 'import', 'levene', ',', 'bartlett', 'k', '=', 'len', '(', 'args', ')', 'if', 'k', '<', '2', ':', 'raise', 'ValueError', '(', '"Must enter at least two input sample vectors."', ')', '# Test normality of data', 'normal', ',', '_', '=', 'normality', '(', '*', 'args', ')', 'if', 'np', '.', 'count_nonzero', '(', 'normal', ')', '!=', 'normal', '.', 'size', ':', "# print('Data are not normally distributed. Using Levene test.')", '_', ',', 'p', '=', 'levene', '(', '*', 'args', ')', 'else', ':', '_', ',', 'p', '=', 'bartlett', '(', '*', 'args', ')', 'equal_var', '=', 'True', 'if', 'p', '>', 'alpha', 'else', 'False', 'return', 'equal_var', ',', 'np', '.', 'round', '(', 'p', ',', '3', ')']
Test equality of variance. Parameters ---------- sample1, sample2,... : array_like Array of sample data. May be different lengths. Returns ------- equal_var : boolean True if data have equal variance. p : float P-value. See Also -------- normality : Test the univariate normality of one or more array(s). sphericity : Mauchly's test for sphericity. Notes ----- This function first tests if the data are normally distributed using the Shapiro-Wilk test. If yes, then the homogeneity of variances is measured using the Bartlett test. If the data are not normally distributed, the Levene (1960) test, which is less sensitive to departure from normality, is used. The **Bartlett** :math:`T` statistic is defined as: .. math:: T = \\frac{(N-k) \\ln{s^{2}_{p}} - \\sum_{i=1}^{k}(N_{i} - 1) \\ln{s^{2}_{i}}}{1 + (1/(3(k-1)))((\\sum_{i=1}^{k}{1/(N_{i} - 1))} - 1/(N-k))} where :math:`s_i^2` is the variance of the :math:`i^{th}` group, :math:`N` is the total sample size, :math:`N_i` is the sample size of the :math:`i^{th}` group, :math:`k` is the number of groups, and :math:`s_p^2` is the pooled variance. The pooled variance is a weighted average of the group variances and is defined as: .. math:: s^{2}_{p} = \\sum_{i=1}^{k}(N_{i} - 1)s^{2}_{i}/(N-k) The p-value is then computed using a chi-square distribution: .. math:: T \\sim \\chi^2(k-1) The **Levene** :math:`W` statistic is defined as: .. math:: W = \\frac{(N-k)} {(k-1)} \\frac{\\sum_{i=1}^{k}N_{i}(\\overline{Z}_{i.}-\\overline{Z})^{2} } {\\sum_{i=1}^{k}\\sum_{j=1}^{N_i}(Z_{ij}-\\overline{Z}_{i.})^{2} } where :math:`Z_{ij} = |Y_{ij} - median({Y}_{i.})|`, :math:`\\overline{Z}_{i.}` are the group means of :math:`Z_{ij}` and :math:`\\overline{Z}` is the grand mean of :math:`Z_{ij}`. The p-value is then computed using a F-distribution: .. math:: W \\sim F(k-1, N-k) References ---------- .. [1] Bartlett, M. S. (1937). Properties of sufficiency and statistical tests. Proc. R. Soc. Lond. A, 160(901), 268-282. .. [2] Brown, M. B., & Forsythe, A. B. (1974). Robust tests for the equality of variances. Journal of the American Statistical Association, 69(346), 364-367. .. [3] NIST/SEMATECH e-Handbook of Statistical Methods, http://www.itl.nist.gov/div898/handbook/ Examples -------- Test the homoscedasticity of two arrays. >>> import numpy as np >>> from pingouin import homoscedasticity >>> np.random.seed(123) >>> # Scale = standard deviation of the distribution. >>> x = np.random.normal(loc=0, scale=1., size=100) >>> y = np.random.normal(loc=0, scale=0.8,size=100) >>> equal_var, p = homoscedasticity(x, y, alpha=.05) >>> print(round(np.var(x), 3), round(np.var(y), 3), equal_var, p) 1.273 0.602 False 0.0
['Test', 'equality', 'of', 'variance', '.']
train
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/distribution.py#L165-L271
6,590
hobson/pug-dj
pug/dj/db.py
make_choices
def make_choices(*args): """Convert a 1-D sequence into a 2-D sequence of tuples for use in a Django field choices attribute >>> make_choices(range(3)) ((0, u'0'), (1, u'1'), (2, u'2')) >>> make_choices(dict(enumerate('abcd'))) ((0, u'a'), (1, u'b'), (2, u'c'), (3, u'd')) >>> make_choices('hello') (('hello', u'hello'),) >>> make_choices('hello', 'world') == make_choices(['hello', 'world']) == (('hello', u'hello'), ('world', u'world')) True """ if not args: return tuple() if isinstance(args[0], (list, tuple)): return make_choices(*tuple(args[0])) elif isinstance(args[0], collections.Mapping): return tuple((k, unicode(v)) for (k, v) in args[0].iteritems()) elif all(isinstance(arg, (int, float, Decimal, basestring)) for arg in args): return tuple((k, unicode(k)) for k in args)
python
def make_choices(*args): """Convert a 1-D sequence into a 2-D sequence of tuples for use in a Django field choices attribute >>> make_choices(range(3)) ((0, u'0'), (1, u'1'), (2, u'2')) >>> make_choices(dict(enumerate('abcd'))) ((0, u'a'), (1, u'b'), (2, u'c'), (3, u'd')) >>> make_choices('hello') (('hello', u'hello'),) >>> make_choices('hello', 'world') == make_choices(['hello', 'world']) == (('hello', u'hello'), ('world', u'world')) True """ if not args: return tuple() if isinstance(args[0], (list, tuple)): return make_choices(*tuple(args[0])) elif isinstance(args[0], collections.Mapping): return tuple((k, unicode(v)) for (k, v) in args[0].iteritems()) elif all(isinstance(arg, (int, float, Decimal, basestring)) for arg in args): return tuple((k, unicode(k)) for k in args)
['def', 'make_choices', '(', '*', 'args', ')', ':', 'if', 'not', 'args', ':', 'return', 'tuple', '(', ')', 'if', 'isinstance', '(', 'args', '[', '0', ']', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'return', 'make_choices', '(', '*', 'tuple', '(', 'args', '[', '0', ']', ')', ')', 'elif', 'isinstance', '(', 'args', '[', '0', ']', ',', 'collections', '.', 'Mapping', ')', ':', 'return', 'tuple', '(', '(', 'k', ',', 'unicode', '(', 'v', ')', ')', 'for', '(', 'k', ',', 'v', ')', 'in', 'args', '[', '0', ']', '.', 'iteritems', '(', ')', ')', 'elif', 'all', '(', 'isinstance', '(', 'arg', ',', '(', 'int', ',', 'float', ',', 'Decimal', ',', 'basestring', ')', ')', 'for', 'arg', 'in', 'args', ')', ':', 'return', 'tuple', '(', '(', 'k', ',', 'unicode', '(', 'k', ')', ')', 'for', 'k', 'in', 'args', ')']
Convert a 1-D sequence into a 2-D sequence of tuples for use in a Django field choices attribute >>> make_choices(range(3)) ((0, u'0'), (1, u'1'), (2, u'2')) >>> make_choices(dict(enumerate('abcd'))) ((0, u'a'), (1, u'b'), (2, u'c'), (3, u'd')) >>> make_choices('hello') (('hello', u'hello'),) >>> make_choices('hello', 'world') == make_choices(['hello', 'world']) == (('hello', u'hello'), ('world', u'world')) True
['Convert', 'a', '1', '-', 'D', 'sequence', 'into', 'a', '2', '-', 'D', 'sequence', 'of', 'tuples', 'for', 'use', 'in', 'a', 'Django', 'field', 'choices', 'attribute']
train
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L153-L172
6,591
praekeltfoundation/seed-stage-based-messaging
seed_stage_based_messaging/decorators.py
internal_only
def internal_only(view_func): """ A view decorator which blocks access for requests coming through the load balancer. """ @functools.wraps(view_func) def wrapper(request, *args, **kwargs): forwards = request.META.get("HTTP_X_FORWARDED_FOR", "").split(",") # The nginx in the docker container adds the loadbalancer IP to the list inside # X-Forwarded-For, so if the list contains more than a single item, we know # that it went through our loadbalancer if len(forwards) > 1: raise PermissionDenied() return view_func(request, *args, **kwargs) return wrapper
python
def internal_only(view_func): """ A view decorator which blocks access for requests coming through the load balancer. """ @functools.wraps(view_func) def wrapper(request, *args, **kwargs): forwards = request.META.get("HTTP_X_FORWARDED_FOR", "").split(",") # The nginx in the docker container adds the loadbalancer IP to the list inside # X-Forwarded-For, so if the list contains more than a single item, we know # that it went through our loadbalancer if len(forwards) > 1: raise PermissionDenied() return view_func(request, *args, **kwargs) return wrapper
['def', 'internal_only', '(', 'view_func', ')', ':', '@', 'functools', '.', 'wraps', '(', 'view_func', ')', 'def', 'wrapper', '(', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'forwards', '=', 'request', '.', 'META', '.', 'get', '(', '"HTTP_X_FORWARDED_FOR"', ',', '""', ')', '.', 'split', '(', '","', ')', '# The nginx in the docker container adds the loadbalancer IP to the list inside', '# X-Forwarded-For, so if the list contains more than a single item, we know', '# that it went through our loadbalancer', 'if', 'len', '(', 'forwards', ')', '>', '1', ':', 'raise', 'PermissionDenied', '(', ')', 'return', 'view_func', '(', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'wrapper']
A view decorator which blocks access for requests coming through the load balancer.
['A', 'view', 'decorator', 'which', 'blocks', 'access', 'for', 'requests', 'coming', 'through', 'the', 'load', 'balancer', '.']
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/seed_stage_based_messaging/decorators.py#L6-L21
6,592
mmp2/megaman
megaman/utils/validation.py
check_array
def check_array(array, accept_sparse=None, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, ensure_min_samples=1, ensure_min_features=1, warn_on_dtype=False): """Input validation on an array, list, sparse matrix or similar. By default, the input is converted to an at least 2nd numpy array. If the dtype of the array is object, attempt converting to float, raising on failure. Parameters ---------- array : object Input object to check / convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. ensure_2d : boolean (default=True) Whether to make X at least 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. ensure_min_samples : int (default=1) Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. warn_on_dtype : boolean (default=False) Raise DataConversionWarning if the dtype of the input data structure does not match the requested dtype, causing a memory copy. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. Returns ------- X_converted : object The converted and validated X. """ if isinstance(accept_sparse, str): accept_sparse = [accept_sparse] # store whether originally we wanted numeric dtype dtype_numeric = dtype == "numeric" dtype_orig = getattr(array, "dtype", None) if not hasattr(dtype_orig, 'kind'): # not a data type (e.g. a column named dtype in a pandas DataFrame) dtype_orig = None if dtype_numeric: if dtype_orig is not None and dtype_orig.kind == "O": # if input is object, convert to float. dtype = np.float64 else: dtype = None if isinstance(dtype, (list, tuple)): if dtype_orig is not None and dtype_orig in dtype: # no dtype conversion required dtype = None else: # dtype conversion required. Let's select the first element of the # list of accepted types. dtype = dtype[0] if sp.issparse(array): array = _ensure_sparse_format(array, accept_sparse, dtype, copy, force_all_finite) else: array = np.array(array, dtype=dtype, order=order, copy=copy) if ensure_2d: if array.ndim == 1: if ensure_min_samples >= 2: raise ValueError("%s expects at least 2 samples provided " "in a 2 dimensional array-like input" % estimator_name) warnings.warn( "Passing 1d arrays as data is deprecated in 0.17 and will" "raise ValueError in 0.19. Reshape your data either using " "X.reshape(-1, 1) if your data has a single feature or " "X.reshape(1, -1) if it contains a single sample.", DeprecationWarning) array = np.atleast_2d(array) # To ensure that array flags are maintained array = np.array(array, dtype=dtype, order=order, copy=copy) # make sure we acually converted to numeric: if dtype_numeric and array.dtype.kind == "O": array = array.astype(np.float64) if not allow_nd and array.ndim >= 3: raise ValueError("Found array with dim %d. expected <= 2." % (array.ndim)) if force_all_finite: _assert_all_finite(array) shape_repr = _shape_repr(array.shape) if ensure_min_samples > 0: n_samples = _num_samples(array) if n_samples < ensure_min_samples: raise ValueError("Found array with %d sample(s) (shape=%s) while a" " minimum of %d is required." % (n_samples, shape_repr, ensure_min_samples)) if ensure_min_features > 0 and array.ndim == 2: n_features = array.shape[1] if n_features < ensure_min_features: raise ValueError("Found array with %d feature(s) (shape=%s) while" " a minimum of %d is required." % (n_features, shape_repr, ensure_min_features)) if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig: msg = ("Data with input dtype %s was converted to %s." % (dtype_orig, array.dtype)) warnings.warn(msg, DataConversionWarning) return array
python
def check_array(array, accept_sparse=None, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, ensure_min_samples=1, ensure_min_features=1, warn_on_dtype=False): """Input validation on an array, list, sparse matrix or similar. By default, the input is converted to an at least 2nd numpy array. If the dtype of the array is object, attempt converting to float, raising on failure. Parameters ---------- array : object Input object to check / convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. ensure_2d : boolean (default=True) Whether to make X at least 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. ensure_min_samples : int (default=1) Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. warn_on_dtype : boolean (default=False) Raise DataConversionWarning if the dtype of the input data structure does not match the requested dtype, causing a memory copy. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. Returns ------- X_converted : object The converted and validated X. """ if isinstance(accept_sparse, str): accept_sparse = [accept_sparse] # store whether originally we wanted numeric dtype dtype_numeric = dtype == "numeric" dtype_orig = getattr(array, "dtype", None) if not hasattr(dtype_orig, 'kind'): # not a data type (e.g. a column named dtype in a pandas DataFrame) dtype_orig = None if dtype_numeric: if dtype_orig is not None and dtype_orig.kind == "O": # if input is object, convert to float. dtype = np.float64 else: dtype = None if isinstance(dtype, (list, tuple)): if dtype_orig is not None and dtype_orig in dtype: # no dtype conversion required dtype = None else: # dtype conversion required. Let's select the first element of the # list of accepted types. dtype = dtype[0] if sp.issparse(array): array = _ensure_sparse_format(array, accept_sparse, dtype, copy, force_all_finite) else: array = np.array(array, dtype=dtype, order=order, copy=copy) if ensure_2d: if array.ndim == 1: if ensure_min_samples >= 2: raise ValueError("%s expects at least 2 samples provided " "in a 2 dimensional array-like input" % estimator_name) warnings.warn( "Passing 1d arrays as data is deprecated in 0.17 and will" "raise ValueError in 0.19. Reshape your data either using " "X.reshape(-1, 1) if your data has a single feature or " "X.reshape(1, -1) if it contains a single sample.", DeprecationWarning) array = np.atleast_2d(array) # To ensure that array flags are maintained array = np.array(array, dtype=dtype, order=order, copy=copy) # make sure we acually converted to numeric: if dtype_numeric and array.dtype.kind == "O": array = array.astype(np.float64) if not allow_nd and array.ndim >= 3: raise ValueError("Found array with dim %d. expected <= 2." % (array.ndim)) if force_all_finite: _assert_all_finite(array) shape_repr = _shape_repr(array.shape) if ensure_min_samples > 0: n_samples = _num_samples(array) if n_samples < ensure_min_samples: raise ValueError("Found array with %d sample(s) (shape=%s) while a" " minimum of %d is required." % (n_samples, shape_repr, ensure_min_samples)) if ensure_min_features > 0 and array.ndim == 2: n_features = array.shape[1] if n_features < ensure_min_features: raise ValueError("Found array with %d feature(s) (shape=%s) while" " a minimum of %d is required." % (n_features, shape_repr, ensure_min_features)) if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig: msg = ("Data with input dtype %s was converted to %s." % (dtype_orig, array.dtype)) warnings.warn(msg, DataConversionWarning) return array
['def', 'check_array', '(', 'array', ',', 'accept_sparse', '=', 'None', ',', 'dtype', '=', '"numeric"', ',', 'order', '=', 'None', ',', 'copy', '=', 'False', ',', 'force_all_finite', '=', 'True', ',', 'ensure_2d', '=', 'True', ',', 'allow_nd', '=', 'False', ',', 'ensure_min_samples', '=', '1', ',', 'ensure_min_features', '=', '1', ',', 'warn_on_dtype', '=', 'False', ')', ':', 'if', 'isinstance', '(', 'accept_sparse', ',', 'str', ')', ':', 'accept_sparse', '=', '[', 'accept_sparse', ']', '# store whether originally we wanted numeric dtype', 'dtype_numeric', '=', 'dtype', '==', '"numeric"', 'dtype_orig', '=', 'getattr', '(', 'array', ',', '"dtype"', ',', 'None', ')', 'if', 'not', 'hasattr', '(', 'dtype_orig', ',', "'kind'", ')', ':', '# not a data type (e.g. a column named dtype in a pandas DataFrame)', 'dtype_orig', '=', 'None', 'if', 'dtype_numeric', ':', 'if', 'dtype_orig', 'is', 'not', 'None', 'and', 'dtype_orig', '.', 'kind', '==', '"O"', ':', '# if input is object, convert to float.', 'dtype', '=', 'np', '.', 'float64', 'else', ':', 'dtype', '=', 'None', 'if', 'isinstance', '(', 'dtype', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'if', 'dtype_orig', 'is', 'not', 'None', 'and', 'dtype_orig', 'in', 'dtype', ':', '# no dtype conversion required', 'dtype', '=', 'None', 'else', ':', "# dtype conversion required. Let's select the first element of the", '# list of accepted types.', 'dtype', '=', 'dtype', '[', '0', ']', 'if', 'sp', '.', 'issparse', '(', 'array', ')', ':', 'array', '=', '_ensure_sparse_format', '(', 'array', ',', 'accept_sparse', ',', 'dtype', ',', 'copy', ',', 'force_all_finite', ')', 'else', ':', 'array', '=', 'np', '.', 'array', '(', 'array', ',', 'dtype', '=', 'dtype', ',', 'order', '=', 'order', ',', 'copy', '=', 'copy', ')', 'if', 'ensure_2d', ':', 'if', 'array', '.', 'ndim', '==', '1', ':', 'if', 'ensure_min_samples', '>=', '2', ':', 'raise', 'ValueError', '(', '"%s expects at least 2 samples provided "', '"in a 2 dimensional array-like input"', '%', 'estimator_name', ')', 'warnings', '.', 'warn', '(', '"Passing 1d arrays as data is deprecated in 0.17 and will"', '"raise ValueError in 0.19. Reshape your data either using "', '"X.reshape(-1, 1) if your data has a single feature or "', '"X.reshape(1, -1) if it contains a single sample."', ',', 'DeprecationWarning', ')', 'array', '=', 'np', '.', 'atleast_2d', '(', 'array', ')', '# To ensure that array flags are maintained', 'array', '=', 'np', '.', 'array', '(', 'array', ',', 'dtype', '=', 'dtype', ',', 'order', '=', 'order', ',', 'copy', '=', 'copy', ')', '# make sure we acually converted to numeric:', 'if', 'dtype_numeric', 'and', 'array', '.', 'dtype', '.', 'kind', '==', '"O"', ':', 'array', '=', 'array', '.', 'astype', '(', 'np', '.', 'float64', ')', 'if', 'not', 'allow_nd', 'and', 'array', '.', 'ndim', '>=', '3', ':', 'raise', 'ValueError', '(', '"Found array with dim %d. expected <= 2."', '%', '(', 'array', '.', 'ndim', ')', ')', 'if', 'force_all_finite', ':', '_assert_all_finite', '(', 'array', ')', 'shape_repr', '=', '_shape_repr', '(', 'array', '.', 'shape', ')', 'if', 'ensure_min_samples', '>', '0', ':', 'n_samples', '=', '_num_samples', '(', 'array', ')', 'if', 'n_samples', '<', 'ensure_min_samples', ':', 'raise', 'ValueError', '(', '"Found array with %d sample(s) (shape=%s) while a"', '" minimum of %d is required."', '%', '(', 'n_samples', ',', 'shape_repr', ',', 'ensure_min_samples', ')', ')', 'if', 'ensure_min_features', '>', '0', 'and', 'array', '.', 'ndim', '==', '2', ':', 'n_features', '=', 'array', '.', 'shape', '[', '1', ']', 'if', 'n_features', '<', 'ensure_min_features', ':', 'raise', 'ValueError', '(', '"Found array with %d feature(s) (shape=%s) while"', '" a minimum of %d is required."', '%', '(', 'n_features', ',', 'shape_repr', ',', 'ensure_min_features', ')', ')', 'if', 'warn_on_dtype', 'and', 'dtype_orig', 'is', 'not', 'None', 'and', 'array', '.', 'dtype', '!=', 'dtype_orig', ':', 'msg', '=', '(', '"Data with input dtype %s was converted to %s."', '%', '(', 'dtype_orig', ',', 'array', '.', 'dtype', ')', ')', 'warnings', '.', 'warn', '(', 'msg', ',', 'DataConversionWarning', ')', 'return', 'array']
Input validation on an array, list, sparse matrix or similar. By default, the input is converted to an at least 2nd numpy array. If the dtype of the array is object, attempt converting to float, raising on failure. Parameters ---------- array : object Input object to check / convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. ensure_2d : boolean (default=True) Whether to make X at least 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. ensure_min_samples : int (default=1) Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. warn_on_dtype : boolean (default=False) Raise DataConversionWarning if the dtype of the input data structure does not match the requested dtype, causing a memory copy. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. Returns ------- X_converted : object The converted and validated X.
['Input', 'validation', 'on', 'an', 'array', 'list', 'sparse', 'matrix', 'or', 'similar', '.', 'By', 'default', 'the', 'input', 'is', 'converted', 'to', 'an', 'at', 'least', '2nd', 'numpy', 'array', '.', 'If', 'the', 'dtype', 'of', 'the', 'array', 'is', 'object', 'attempt', 'converting', 'to', 'float', 'raising', 'on', 'failure', '.', 'Parameters', '----------', 'array', ':', 'object', 'Input', 'object', 'to', 'check', '/', 'convert', '.', 'accept_sparse', ':', 'string', 'list', 'of', 'string', 'or', 'None', '(', 'default', '=', 'None', ')', 'String', '[', 's', ']', 'representing', 'allowed', 'sparse', 'matrix', 'formats', 'such', 'as', 'csc', 'csr', 'etc', '.', 'None', 'means', 'that', 'sparse', 'matrix', 'input', 'will', 'raise', 'an', 'error', '.', 'If', 'the', 'input', 'is', 'sparse', 'but', 'not', 'in', 'the', 'allowed', 'format', 'it', 'will', 'be', 'converted', 'to', 'the', 'first', 'listed', 'format', '.', 'dtype', ':', 'string', 'type', 'list', 'of', 'types', 'or', 'None', '(', 'default', '=', 'numeric', ')', 'Data', 'type', 'of', 'result', '.', 'If', 'None', 'the', 'dtype', 'of', 'the', 'input', 'is', 'preserved', '.', 'If', 'numeric', 'dtype', 'is', 'preserved', 'unless', 'array', '.', 'dtype', 'is', 'object', '.', 'If', 'dtype', 'is', 'a', 'list', 'of', 'types', 'conversion', 'on', 'the', 'first', 'type', 'is', 'only', 'performed', 'if', 'the', 'dtype', 'of', 'the', 'input', 'is', 'not', 'in', 'the', 'list', '.', 'order', ':', 'F', 'C', 'or', 'None', '(', 'default', '=', 'None', ')', 'Whether', 'an', 'array', 'will', 'be', 'forced', 'to', 'be', 'fortran', 'or', 'c', '-', 'style', '.', 'copy', ':', 'boolean', '(', 'default', '=', 'False', ')', 'Whether', 'a', 'forced', 'copy', 'will', 'be', 'triggered', '.', 'If', 'copy', '=', 'False', 'a', 'copy', 'might', 'be', 'triggered', 'by', 'a', 'conversion', '.', 'force_all_finite', ':', 'boolean', '(', 'default', '=', 'True', ')', 'Whether', 'to', 'raise', 'an', 'error', 'on', 'np', '.', 'inf', 'and', 'np', '.', 'nan', 'in', 'X', '.', 'ensure_2d', ':', 'boolean', '(', 'default', '=', 'True', ')', 'Whether', 'to', 'make', 'X', 'at', 'least', '2d', '.', 'allow_nd', ':', 'boolean', '(', 'default', '=', 'False', ')', 'Whether', 'to', 'allow', 'X', '.', 'ndim', '>', '2', '.', 'ensure_min_samples', ':', 'int', '(', 'default', '=', '1', ')', 'Make', 'sure', 'that', 'the', 'array', 'has', 'a', 'minimum', 'number', 'of', 'samples', 'in', 'its', 'first', 'axis', '(', 'rows', 'for', 'a', '2D', 'array', ')', '.', 'Setting', 'to', '0', 'disables', 'this', 'check', '.', 'ensure_min_features', ':', 'int', '(', 'default', '=', '1', ')', 'Make', 'sure', 'that', 'the', '2D', 'array', 'has', 'some', 'minimum', 'number', 'of', 'features', '(', 'columns', ')', '.', 'The', 'default', 'value', 'of', '1', 'rejects', 'empty', 'datasets', '.', 'This', 'check', 'is', 'only', 'enforced', 'when', 'the', 'input', 'data', 'has', 'effectively', '2', 'dimensions', 'or', 'is', 'originally', '1D', 'and', 'ensure_2d', 'is', 'True', '.', 'Setting', 'to', '0', 'disables', 'this', 'check', '.', 'warn_on_dtype', ':', 'boolean', '(', 'default', '=', 'False', ')', 'Raise', 'DataConversionWarning', 'if', 'the', 'dtype', 'of', 'the', 'input', 'data', 'structure', 'does', 'not', 'match', 'the', 'requested', 'dtype', 'causing', 'a', 'memory', 'copy', '.', 'estimator', ':', 'str', 'or', 'estimator', 'instance', '(', 'default', '=', 'None', ')', 'If', 'passed', 'include', 'the', 'name', 'of', 'the', 'estimator', 'in', 'warning', 'messages', '.', 'Returns', '-------', 'X_converted', ':', 'object', 'The', 'converted', 'and', 'validated', 'X', '.']
train
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/validation.py#L210-L339
6,593
MichaelAquilina/hashedindex
hashedindex/textparser.py
word_tokenize
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True): """ Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character. """ if ngrams is None: ngrams = 1 text = re.sub(re.compile('\'s'), '', text) # Simple heuristic text = re.sub(_re_punctuation, '', text) matched_tokens = re.findall(_re_token, text.lower()) for tokens in get_ngrams(matched_tokens, ngrams): for i in range(len(tokens)): tokens[i] = tokens[i].strip(punctuation) if len(tokens[i]) < min_length or tokens[i] in stopwords: break if ignore_numeric and isnumeric(tokens[i]): break else: yield tuple(tokens)
python
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True): """ Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character. """ if ngrams is None: ngrams = 1 text = re.sub(re.compile('\'s'), '', text) # Simple heuristic text = re.sub(_re_punctuation, '', text) matched_tokens = re.findall(_re_token, text.lower()) for tokens in get_ngrams(matched_tokens, ngrams): for i in range(len(tokens)): tokens[i] = tokens[i].strip(punctuation) if len(tokens[i]) < min_length or tokens[i] in stopwords: break if ignore_numeric and isnumeric(tokens[i]): break else: yield tuple(tokens)
['def', 'word_tokenize', '(', 'text', ',', 'stopwords', '=', '_stopwords', ',', 'ngrams', '=', 'None', ',', 'min_length', '=', '0', ',', 'ignore_numeric', '=', 'True', ')', ':', 'if', 'ngrams', 'is', 'None', ':', 'ngrams', '=', '1', 'text', '=', 're', '.', 'sub', '(', 're', '.', 'compile', '(', "'\\'s'", ')', ',', "''", ',', 'text', ')', '# Simple heuristic', 'text', '=', 're', '.', 'sub', '(', '_re_punctuation', ',', "''", ',', 'text', ')', 'matched_tokens', '=', 're', '.', 'findall', '(', '_re_token', ',', 'text', '.', 'lower', '(', ')', ')', 'for', 'tokens', 'in', 'get_ngrams', '(', 'matched_tokens', ',', 'ngrams', ')', ':', 'for', 'i', 'in', 'range', '(', 'len', '(', 'tokens', ')', ')', ':', 'tokens', '[', 'i', ']', '=', 'tokens', '[', 'i', ']', '.', 'strip', '(', 'punctuation', ')', 'if', 'len', '(', 'tokens', '[', 'i', ']', ')', '<', 'min_length', 'or', 'tokens', '[', 'i', ']', 'in', 'stopwords', ':', 'break', 'if', 'ignore_numeric', 'and', 'isnumeric', '(', 'tokens', '[', 'i', ']', ')', ':', 'break', 'else', ':', 'yield', 'tuple', '(', 'tokens', ')']
Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character.
['Parses', 'the', 'given', 'text', 'and', 'yields', 'tokens', 'which', 'represent', 'words', 'within', 'the', 'given', 'text', '.', 'Tokens', 'are', 'assumed', 'to', 'be', 'divided', 'by', 'any', 'form', 'of', 'whitespace', 'character', '.']
train
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/textparser.py#L67-L89
6,594
nikdon/pyEntropy
pyentrp/entropy.py
util_granulate_time_series
def util_granulate_time_series(time_series, scale): """Extract coarse-grained time series Args: time_series: Time series scale: Scale factor Returns: Vector of coarse-grained time series with given scale factor """ n = len(time_series) b = int(np.fix(n / scale)) temp = np.reshape(time_series[0:b*scale], (b, scale)) cts = np.mean(temp, axis = 1) return cts
python
def util_granulate_time_series(time_series, scale): """Extract coarse-grained time series Args: time_series: Time series scale: Scale factor Returns: Vector of coarse-grained time series with given scale factor """ n = len(time_series) b = int(np.fix(n / scale)) temp = np.reshape(time_series[0:b*scale], (b, scale)) cts = np.mean(temp, axis = 1) return cts
['def', 'util_granulate_time_series', '(', 'time_series', ',', 'scale', ')', ':', 'n', '=', 'len', '(', 'time_series', ')', 'b', '=', 'int', '(', 'np', '.', 'fix', '(', 'n', '/', 'scale', ')', ')', 'temp', '=', 'np', '.', 'reshape', '(', 'time_series', '[', '0', ':', 'b', '*', 'scale', ']', ',', '(', 'b', ',', 'scale', ')', ')', 'cts', '=', 'np', '.', 'mean', '(', 'temp', ',', 'axis', '=', '1', ')', 'return', 'cts']
Extract coarse-grained time series Args: time_series: Time series scale: Scale factor Returns: Vector of coarse-grained time series with given scale factor
['Extract', 'coarse', '-', 'grained', 'time', 'series']
train
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L64-L78
6,595
fermiPy/fermipy
fermipy/spectrum.py
SpectralFunction._integrate
def _integrate(cls, fn, emin, emax, params, scale=1.0, extra_params=None, npt=20): """Fast numerical integration method using mid-point rule.""" emin = np.expand_dims(emin, -1) emax = np.expand_dims(emax, -1) params = copy.deepcopy(params) for i, p in enumerate(params): params[i] = np.expand_dims(params[i], -1) xedges = np.linspace(0.0, 1.0, npt + 1) logx_edge = np.log(emin) + xedges * (np.log(emax) - np.log(emin)) logx = 0.5 * (logx_edge[..., 1:] + logx_edge[..., :-1]) xw = np.exp(logx_edge[..., 1:]) - np.exp(logx_edge[..., :-1]) dnde = fn(np.exp(logx), params, scale, extra_params) return np.sum(dnde * xw, axis=-1)
python
def _integrate(cls, fn, emin, emax, params, scale=1.0, extra_params=None, npt=20): """Fast numerical integration method using mid-point rule.""" emin = np.expand_dims(emin, -1) emax = np.expand_dims(emax, -1) params = copy.deepcopy(params) for i, p in enumerate(params): params[i] = np.expand_dims(params[i], -1) xedges = np.linspace(0.0, 1.0, npt + 1) logx_edge = np.log(emin) + xedges * (np.log(emax) - np.log(emin)) logx = 0.5 * (logx_edge[..., 1:] + logx_edge[..., :-1]) xw = np.exp(logx_edge[..., 1:]) - np.exp(logx_edge[..., :-1]) dnde = fn(np.exp(logx), params, scale, extra_params) return np.sum(dnde * xw, axis=-1)
['def', '_integrate', '(', 'cls', ',', 'fn', ',', 'emin', ',', 'emax', ',', 'params', ',', 'scale', '=', '1.0', ',', 'extra_params', '=', 'None', ',', 'npt', '=', '20', ')', ':', 'emin', '=', 'np', '.', 'expand_dims', '(', 'emin', ',', '-', '1', ')', 'emax', '=', 'np', '.', 'expand_dims', '(', 'emax', ',', '-', '1', ')', 'params', '=', 'copy', '.', 'deepcopy', '(', 'params', ')', 'for', 'i', ',', 'p', 'in', 'enumerate', '(', 'params', ')', ':', 'params', '[', 'i', ']', '=', 'np', '.', 'expand_dims', '(', 'params', '[', 'i', ']', ',', '-', '1', ')', 'xedges', '=', 'np', '.', 'linspace', '(', '0.0', ',', '1.0', ',', 'npt', '+', '1', ')', 'logx_edge', '=', 'np', '.', 'log', '(', 'emin', ')', '+', 'xedges', '*', '(', 'np', '.', 'log', '(', 'emax', ')', '-', 'np', '.', 'log', '(', 'emin', ')', ')', 'logx', '=', '0.5', '*', '(', 'logx_edge', '[', '...', ',', '1', ':', ']', '+', 'logx_edge', '[', '...', ',', ':', '-', '1', ']', ')', 'xw', '=', 'np', '.', 'exp', '(', 'logx_edge', '[', '...', ',', '1', ':', ']', ')', '-', 'np', '.', 'exp', '(', 'logx_edge', '[', '...', ',', ':', '-', '1', ']', ')', 'dnde', '=', 'fn', '(', 'np', '.', 'exp', '(', 'logx', ')', ',', 'params', ',', 'scale', ',', 'extra_params', ')', 'return', 'np', '.', 'sum', '(', 'dnde', '*', 'xw', ',', 'axis', '=', '-', '1', ')']
Fast numerical integration method using mid-point rule.
['Fast', 'numerical', 'integration', 'method', 'using', 'mid', '-', 'point', 'rule', '.']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/spectrum.py#L230-L246
6,596
wavefrontHQ/python-client
wavefront_api_client/api/user_api.py
UserApi.remove_user_from_user_groups
def remove_user_from_user_groups(self, id, **kwargs): # noqa: E501 """Removes specific user groups from the user # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove_user_from_user_groups(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param list[str] body: The list of user groups that should be removed from the user :return: UserModel If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.remove_user_from_user_groups_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.remove_user_from_user_groups_with_http_info(id, **kwargs) # noqa: E501 return data
python
def remove_user_from_user_groups(self, id, **kwargs): # noqa: E501 """Removes specific user groups from the user # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove_user_from_user_groups(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param list[str] body: The list of user groups that should be removed from the user :return: UserModel If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.remove_user_from_user_groups_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.remove_user_from_user_groups_with_http_info(id, **kwargs) # noqa: E501 return data
['def', 'remove_user_from_user_groups', '(', 'self', ',', 'id', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async_req'", ')', ':', 'return', 'self', '.', 'remove_user_from_user_groups_with_http_info', '(', 'id', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'remove_user_from_user_groups_with_http_info', '(', 'id', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data']
Removes specific user groups from the user # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove_user_from_user_groups(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param list[str] body: The list of user groups that should be removed from the user :return: UserModel If the method is called asynchronously, returns the request thread.
['Removes', 'specific', 'user', 'groups', 'from', 'the', 'user', '#', 'noqa', ':', 'E501']
train
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/user_api.py#L911-L932
6,597
mmp2/megaman
megaman/utils/k_means_clustering.py
get_centroids
def get_centroids(data,k,labels,centroids,data_norms): """ For each element in the dataset, choose the closest centroid Parameters ------------ data: array-like, shape= (m_samples,n_samples) K: integer, number of K clusters centroids: array-like, shape=(K, n_samples) labels: array-like, shape (1,n_samples) returns ------------- centroids: array-like, shape (K,n_samples) """ D = data.shape[1] for j in range(k): cluster_points = np.where(labels == j) cluster_total = len(cluster_points) if cluster_total == 0: _, temp = new_orthogonal_center(data,data_norms,centroids) else: temp = np.mean(data[cluster_points,:],axis=1) centroids[j,:] = temp return centroids
python
def get_centroids(data,k,labels,centroids,data_norms): """ For each element in the dataset, choose the closest centroid Parameters ------------ data: array-like, shape= (m_samples,n_samples) K: integer, number of K clusters centroids: array-like, shape=(K, n_samples) labels: array-like, shape (1,n_samples) returns ------------- centroids: array-like, shape (K,n_samples) """ D = data.shape[1] for j in range(k): cluster_points = np.where(labels == j) cluster_total = len(cluster_points) if cluster_total == 0: _, temp = new_orthogonal_center(data,data_norms,centroids) else: temp = np.mean(data[cluster_points,:],axis=1) centroids[j,:] = temp return centroids
['def', 'get_centroids', '(', 'data', ',', 'k', ',', 'labels', ',', 'centroids', ',', 'data_norms', ')', ':', 'D', '=', 'data', '.', 'shape', '[', '1', ']', 'for', 'j', 'in', 'range', '(', 'k', ')', ':', 'cluster_points', '=', 'np', '.', 'where', '(', 'labels', '==', 'j', ')', 'cluster_total', '=', 'len', '(', 'cluster_points', ')', 'if', 'cluster_total', '==', '0', ':', '_', ',', 'temp', '=', 'new_orthogonal_center', '(', 'data', ',', 'data_norms', ',', 'centroids', ')', 'else', ':', 'temp', '=', 'np', '.', 'mean', '(', 'data', '[', 'cluster_points', ',', ':', ']', ',', 'axis', '=', '1', ')', 'centroids', '[', 'j', ',', ':', ']', '=', 'temp', 'return', 'centroids']
For each element in the dataset, choose the closest centroid Parameters ------------ data: array-like, shape= (m_samples,n_samples) K: integer, number of K clusters centroids: array-like, shape=(K, n_samples) labels: array-like, shape (1,n_samples) returns ------------- centroids: array-like, shape (K,n_samples)
['For', 'each', 'element', 'in', 'the', 'dataset', 'choose', 'the', 'closest', 'centroid', 'Parameters', '------------', 'data', ':', 'array', '-', 'like', 'shape', '=', '(', 'm_samples', 'n_samples', ')', 'K', ':', 'integer', 'number', 'of', 'K', 'clusters', 'centroids', ':', 'array', '-', 'like', 'shape', '=', '(', 'K', 'n_samples', ')', 'labels', ':', 'array', '-', 'like', 'shape', '(', '1', 'n_samples', ')', 'returns', '-------------', 'centroids', ':', 'array', '-', 'like', 'shape', '(', 'K', 'n_samples', ')']
train
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/k_means_clustering.py#L121-L145
6,598
saltstack/salt
salt/modules/influxdbmod.py
revoke_admin_privileges
def revoke_admin_privileges(name, **client_args): ''' Revoke cluster administration privileges from a user. name Name of the user from whom admin privileges will be revoked. CLI Example: .. code-block:: bash salt '*' influxdb.revoke_admin_privileges <name> ''' client = _client(**client_args) client.revoke_admin_privileges(name) return True
python
def revoke_admin_privileges(name, **client_args): ''' Revoke cluster administration privileges from a user. name Name of the user from whom admin privileges will be revoked. CLI Example: .. code-block:: bash salt '*' influxdb.revoke_admin_privileges <name> ''' client = _client(**client_args) client.revoke_admin_privileges(name) return True
['def', 'revoke_admin_privileges', '(', 'name', ',', '*', '*', 'client_args', ')', ':', 'client', '=', '_client', '(', '*', '*', 'client_args', ')', 'client', '.', 'revoke_admin_privileges', '(', 'name', ')', 'return', 'True']
Revoke cluster administration privileges from a user. name Name of the user from whom admin privileges will be revoked. CLI Example: .. code-block:: bash salt '*' influxdb.revoke_admin_privileges <name>
['Revoke', 'cluster', 'administration', 'privileges', 'from', 'a', 'user', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L292-L308
6,599
adrienverge/yamllint
yamllint/rules/common.py
get_real_end_line
def get_real_end_line(token): """Finds the line on which the token really ends. With pyyaml, scalar tokens often end on a next line. """ end_line = token.end_mark.line + 1 if not isinstance(token, yaml.ScalarToken): return end_line pos = token.end_mark.pointer - 1 while (pos >= token.start_mark.pointer - 1 and token.end_mark.buffer[pos] in string.whitespace): if token.end_mark.buffer[pos] == '\n': end_line -= 1 pos -= 1 return end_line
python
def get_real_end_line(token): """Finds the line on which the token really ends. With pyyaml, scalar tokens often end on a next line. """ end_line = token.end_mark.line + 1 if not isinstance(token, yaml.ScalarToken): return end_line pos = token.end_mark.pointer - 1 while (pos >= token.start_mark.pointer - 1 and token.end_mark.buffer[pos] in string.whitespace): if token.end_mark.buffer[pos] == '\n': end_line -= 1 pos -= 1 return end_line
['def', 'get_real_end_line', '(', 'token', ')', ':', 'end_line', '=', 'token', '.', 'end_mark', '.', 'line', '+', '1', 'if', 'not', 'isinstance', '(', 'token', ',', 'yaml', '.', 'ScalarToken', ')', ':', 'return', 'end_line', 'pos', '=', 'token', '.', 'end_mark', '.', 'pointer', '-', '1', 'while', '(', 'pos', '>=', 'token', '.', 'start_mark', '.', 'pointer', '-', '1', 'and', 'token', '.', 'end_mark', '.', 'buffer', '[', 'pos', ']', 'in', 'string', '.', 'whitespace', ')', ':', 'if', 'token', '.', 'end_mark', '.', 'buffer', '[', 'pos', ']', '==', "'\\n'", ':', 'end_line', '-=', '1', 'pos', '-=', '1', 'return', 'end_line']
Finds the line on which the token really ends. With pyyaml, scalar tokens often end on a next line.
['Finds', 'the', 'line', 'on', 'which', 'the', 'token', 'really', 'ends', '.']
train
https://github.com/adrienverge/yamllint/blob/fec2c2fba736cabf6bee6b5eeb905cab0dc820f6/yamllint/rules/common.py#L61-L77