Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
5,200
rhelmot/nclib
nclib/netcat.py
Netcat.recv_line
def recv_line(self, max_size=None, timeout='default', ending=None): """ Recieve until the next newline , default "\\n". The newline string can be changed by changing ``nc.LINE_ENDING``. The newline will be returned as part of the string. Aliases: recvline, readline, read_line, readln, recvln """ if ending is None: ending = self.LINE_ENDING return self.recv_until(ending, max_size, timeout)
python
def recv_line(self, max_size=None, timeout='default', ending=None): """ Recieve until the next newline , default "\\n". The newline string can be changed by changing ``nc.LINE_ENDING``. The newline will be returned as part of the string. Aliases: recvline, readline, read_line, readln, recvln """ if ending is None: ending = self.LINE_ENDING return self.recv_until(ending, max_size, timeout)
['def', 'recv_line', '(', 'self', ',', 'max_size', '=', 'None', ',', 'timeout', '=', "'default'", ',', 'ending', '=', 'None', ')', ':', 'if', 'ending', 'is', 'None', ':', 'ending', '=', 'self', '.', 'LINE_ENDING', 'return', 'self', '.', 'recv_until', '(', 'ending', ',', 'max_size', ',', 'timeout', ')']
Recieve until the next newline , default "\\n". The newline string can be changed by changing ``nc.LINE_ENDING``. The newline will be returned as part of the string. Aliases: recvline, readline, read_line, readln, recvln
['Recieve', 'until', 'the', 'next', 'newline', 'default', '\\\\', 'n', '.', 'The', 'newline', 'string', 'can', 'be', 'changed', 'by', 'changing', 'nc', '.', 'LINE_ENDING', '.', 'The', 'newline', 'will', 'be', 'returned', 'as', 'part', 'of', 'the', 'string', '.']
train
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L762-L772
5,201
ianmiell/shutit
shutit_class.py
ShutIt.get_current_environment
def get_current_environment(self, note=None): """Returns the current environment id from the current shutit_pexpect_session """ shutit_global.shutit_global_object.yield_to_draw() self.handle_note(note) res = self.get_current_shutit_pexpect_session_environment().environment_id self.handle_note_after(note) return res
python
def get_current_environment(self, note=None): """Returns the current environment id from the current shutit_pexpect_session """ shutit_global.shutit_global_object.yield_to_draw() self.handle_note(note) res = self.get_current_shutit_pexpect_session_environment().environment_id self.handle_note_after(note) return res
['def', 'get_current_environment', '(', 'self', ',', 'note', '=', 'None', ')', ':', 'shutit_global', '.', 'shutit_global_object', '.', 'yield_to_draw', '(', ')', 'self', '.', 'handle_note', '(', 'note', ')', 'res', '=', 'self', '.', 'get_current_shutit_pexpect_session_environment', '(', ')', '.', 'environment_id', 'self', '.', 'handle_note_after', '(', 'note', ')', 'return', 'res']
Returns the current environment id from the current shutit_pexpect_session
['Returns', 'the', 'current', 'environment', 'id', 'from', 'the', 'current', 'shutit_pexpect_session']
train
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L632-L640
5,202
wonambi-python/wonambi
wonambi/ioeeg/micromed.py
Micromed.return_markers
def return_markers(self): """Return all the markers (also called triggers or events). Returns ------- list of dict where each dict contains 'name' as str, 'start' and 'end' as float in seconds from the start of the recordings, and 'chan' as list of str with the channels involved (if not of relevance, it's None). Raises ------ FileNotFoundError when it cannot read the events for some reason (don't use other exceptions). """ markers = [] triggers = self._triggers DTYPE_MAX = iinfo(triggers.dtype['sample']).max triggers = triggers[triggers['sample'] != DTYPE_MAX] for trig in triggers: markers.append( {'name': str(trig['code']), 'start': trig['sample'] / self._s_freq, 'end': trig['sample'] / self._s_freq, }) return markers
python
def return_markers(self): """Return all the markers (also called triggers or events). Returns ------- list of dict where each dict contains 'name' as str, 'start' and 'end' as float in seconds from the start of the recordings, and 'chan' as list of str with the channels involved (if not of relevance, it's None). Raises ------ FileNotFoundError when it cannot read the events for some reason (don't use other exceptions). """ markers = [] triggers = self._triggers DTYPE_MAX = iinfo(triggers.dtype['sample']).max triggers = triggers[triggers['sample'] != DTYPE_MAX] for trig in triggers: markers.append( {'name': str(trig['code']), 'start': trig['sample'] / self._s_freq, 'end': trig['sample'] / self._s_freq, }) return markers
['def', 'return_markers', '(', 'self', ')', ':', 'markers', '=', '[', ']', 'triggers', '=', 'self', '.', '_triggers', 'DTYPE_MAX', '=', 'iinfo', '(', 'triggers', '.', 'dtype', '[', "'sample'", ']', ')', '.', 'max', 'triggers', '=', 'triggers', '[', 'triggers', '[', "'sample'", ']', '!=', 'DTYPE_MAX', ']', 'for', 'trig', 'in', 'triggers', ':', 'markers', '.', 'append', '(', '{', "'name'", ':', 'str', '(', 'trig', '[', "'code'", ']', ')', ',', "'start'", ':', 'trig', '[', "'sample'", ']', '/', 'self', '.', '_s_freq', ',', "'end'", ':', 'trig', '[', "'sample'", ']', '/', 'self', '.', '_s_freq', ',', '}', ')', 'return', 'markers']
Return all the markers (also called triggers or events). Returns ------- list of dict where each dict contains 'name' as str, 'start' and 'end' as float in seconds from the start of the recordings, and 'chan' as list of str with the channels involved (if not of relevance, it's None). Raises ------ FileNotFoundError when it cannot read the events for some reason (don't use other exceptions).
['Return', 'all', 'the', 'markers', '(', 'also', 'called', 'triggers', 'or', 'events', ')', '.']
train
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/micromed.py#L116-L145
5,203
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor_ext.py
brocade_system_monitor_ext.show_system_monitor_output_switch_status_rbridge_id_out
def show_system_monitor_output_switch_status_rbridge_id_out(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_system_monitor = ET.Element("show_system_monitor") config = show_system_monitor output = ET.SubElement(show_system_monitor, "output") switch_status = ET.SubElement(output, "switch-status") rbridge_id_out = ET.SubElement(switch_status, "rbridge-id-out") rbridge_id_out.text = kwargs.pop('rbridge_id_out') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def show_system_monitor_output_switch_status_rbridge_id_out(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_system_monitor = ET.Element("show_system_monitor") config = show_system_monitor output = ET.SubElement(show_system_monitor, "output") switch_status = ET.SubElement(output, "switch-status") rbridge_id_out = ET.SubElement(switch_status, "rbridge-id-out") rbridge_id_out.text = kwargs.pop('rbridge_id_out') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'show_system_monitor_output_switch_status_rbridge_id_out', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'show_system_monitor', '=', 'ET', '.', 'Element', '(', '"show_system_monitor"', ')', 'config', '=', 'show_system_monitor', 'output', '=', 'ET', '.', 'SubElement', '(', 'show_system_monitor', ',', '"output"', ')', 'switch_status', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"switch-status"', ')', 'rbridge_id_out', '=', 'ET', '.', 'SubElement', '(', 'switch_status', ',', '"rbridge-id-out"', ')', 'rbridge_id_out', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'rbridge_id_out'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor_ext.py#L25-L37
5,204
hyperledger/sawtooth-core
rest_api/sawtooth_rest_api/route_handlers.py
RouteHandler._get_filter_ids
def _get_filter_ids(cls, request): """Parses the `id` filter paramter from the url query. """ id_query = request.url.query.get('id', None) if id_query is None: return None filter_ids = id_query.split(',') for filter_id in filter_ids: cls._validate_id(filter_id) return filter_ids
python
def _get_filter_ids(cls, request): """Parses the `id` filter paramter from the url query. """ id_query = request.url.query.get('id', None) if id_query is None: return None filter_ids = id_query.split(',') for filter_id in filter_ids: cls._validate_id(filter_id) return filter_ids
['def', '_get_filter_ids', '(', 'cls', ',', 'request', ')', ':', 'id_query', '=', 'request', '.', 'url', '.', 'query', '.', 'get', '(', "'id'", ',', 'None', ')', 'if', 'id_query', 'is', 'None', ':', 'return', 'None', 'filter_ids', '=', 'id_query', '.', 'split', '(', "','", ')', 'for', 'filter_id', 'in', 'filter_ids', ':', 'cls', '.', '_validate_id', '(', 'filter_id', ')', 'return', 'filter_ids']
Parses the `id` filter paramter from the url query.
['Parses', 'the', 'id', 'filter', 'paramter', 'from', 'the', 'url', 'query', '.']
train
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/rest_api/sawtooth_rest_api/route_handlers.py#L1011-L1023
5,205
bjmorgan/lattice_mc
lattice_mc/simulation.py
Simulation.collective_diffusion_coefficient
def collective_diffusion_coefficient( self ): """ Returns the collective or "jump" diffusion coefficient, D_J. Args: None Returns: (Float): The collective diffusion coefficient, D_J. """ if self.has_run: return self.atoms.collective_dr_squared() / ( 6.0 * self.lattice.time ) else: return None
python
def collective_diffusion_coefficient( self ): """ Returns the collective or "jump" diffusion coefficient, D_J. Args: None Returns: (Float): The collective diffusion coefficient, D_J. """ if self.has_run: return self.atoms.collective_dr_squared() / ( 6.0 * self.lattice.time ) else: return None
['def', 'collective_diffusion_coefficient', '(', 'self', ')', ':', 'if', 'self', '.', 'has_run', ':', 'return', 'self', '.', 'atoms', '.', 'collective_dr_squared', '(', ')', '/', '(', '6.0', '*', 'self', '.', 'lattice', '.', 'time', ')', 'else', ':', 'return', 'None']
Returns the collective or "jump" diffusion coefficient, D_J. Args: None Returns: (Float): The collective diffusion coefficient, D_J.
['Returns', 'the', 'collective', 'or', 'jump', 'diffusion', 'coefficient', 'D_J', '.']
train
https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/simulation.py#L274-L287
5,206
scarface-4711/denonavr
denonavr/denonavr.py
DenonAVR.set_volume
def set_volume(self, volume): """ Set receiver volume via HTTP get command. Volume is send in a format like -50.0. Minimum is -80.0, maximum at 18.0 """ if volume < -80 or volume > 18: raise ValueError("Invalid volume") try: return bool(self.send_get_command( self._urls.command_set_volume % volume)) except requests.exceptions.RequestException: _LOGGER.error("Connection error: set volume command not sent.") return False
python
def set_volume(self, volume): """ Set receiver volume via HTTP get command. Volume is send in a format like -50.0. Minimum is -80.0, maximum at 18.0 """ if volume < -80 or volume > 18: raise ValueError("Invalid volume") try: return bool(self.send_get_command( self._urls.command_set_volume % volume)) except requests.exceptions.RequestException: _LOGGER.error("Connection error: set volume command not sent.") return False
['def', 'set_volume', '(', 'self', ',', 'volume', ')', ':', 'if', 'volume', '<', '-', '80', 'or', 'volume', '>', '18', ':', 'raise', 'ValueError', '(', '"Invalid volume"', ')', 'try', ':', 'return', 'bool', '(', 'self', '.', 'send_get_command', '(', 'self', '.', '_urls', '.', 'command_set_volume', '%', 'volume', ')', ')', 'except', 'requests', '.', 'exceptions', '.', 'RequestException', ':', '_LOGGER', '.', 'error', '(', '"Connection error: set volume command not sent."', ')', 'return', 'False']
Set receiver volume via HTTP get command. Volume is send in a format like -50.0. Minimum is -80.0, maximum at 18.0
['Set', 'receiver', 'volume', 'via', 'HTTP', 'get', 'command', '.']
train
https://github.com/scarface-4711/denonavr/blob/59a136e27b43cb1d1e140cf67705087b3aa377cd/denonavr/denonavr.py#L1640-L1655
5,207
payplug/payplug-python
payplug/__init__.py
Card.delete
def delete(customer, card): """ Delete a card from its id. :param customer: The customer id or object :type customer: string|Customer :param card: The card id or object :type card: string|Card """ if isinstance(customer, resources.Customer): customer = customer.id if isinstance(card, resources.Card): card = card.id http_client = HttpClient() http_client.delete(routes.url(routes.CARD_RESOURCE, resource_id=card, customer_id=customer))
python
def delete(customer, card): """ Delete a card from its id. :param customer: The customer id or object :type customer: string|Customer :param card: The card id or object :type card: string|Card """ if isinstance(customer, resources.Customer): customer = customer.id if isinstance(card, resources.Card): card = card.id http_client = HttpClient() http_client.delete(routes.url(routes.CARD_RESOURCE, resource_id=card, customer_id=customer))
['def', 'delete', '(', 'customer', ',', 'card', ')', ':', 'if', 'isinstance', '(', 'customer', ',', 'resources', '.', 'Customer', ')', ':', 'customer', '=', 'customer', '.', 'id', 'if', 'isinstance', '(', 'card', ',', 'resources', '.', 'Card', ')', ':', 'card', '=', 'card', '.', 'id', 'http_client', '=', 'HttpClient', '(', ')', 'http_client', '.', 'delete', '(', 'routes', '.', 'url', '(', 'routes', '.', 'CARD_RESOURCE', ',', 'resource_id', '=', 'card', ',', 'customer_id', '=', 'customer', ')', ')']
Delete a card from its id. :param customer: The customer id or object :type customer: string|Customer :param card: The card id or object :type card: string|Card
['Delete', 'a', 'card', 'from', 'its', 'id', '.']
train
https://github.com/payplug/payplug-python/blob/42dec9d6bff420dd0c26e51a84dd000adff04331/payplug/__init__.py#L284-L299
5,208
pybel/pybel
src/pybel/manager/query_manager.py
QueryManager.query_induction
def query_induction(self, nodes: List[Node]) -> List[Edge]: """Get all edges between any of the given nodes (minimum length of 2).""" if len(nodes) < 2: raise ValueError('not enough nodes given to induce over') return self.session.query(Edge).filter(self._edge_both_nodes(nodes)).all()
python
def query_induction(self, nodes: List[Node]) -> List[Edge]: """Get all edges between any of the given nodes (minimum length of 2).""" if len(nodes) < 2: raise ValueError('not enough nodes given to induce over') return self.session.query(Edge).filter(self._edge_both_nodes(nodes)).all()
['def', 'query_induction', '(', 'self', ',', 'nodes', ':', 'List', '[', 'Node', ']', ')', '->', 'List', '[', 'Edge', ']', ':', 'if', 'len', '(', 'nodes', ')', '<', '2', ':', 'raise', 'ValueError', '(', "'not enough nodes given to induce over'", ')', 'return', 'self', '.', 'session', '.', 'query', '(', 'Edge', ')', '.', 'filter', '(', 'self', '.', '_edge_both_nodes', '(', 'nodes', ')', ')', '.', 'all', '(', ')']
Get all edges between any of the given nodes (minimum length of 2).
['Get', 'all', 'edges', 'between', 'any', 'of', 'the', 'given', 'nodes', '(', 'minimum', 'length', 'of', '2', ')', '.']
train
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/query_manager.py#L229-L234
5,209
exa-analytics/exa
exa/typed.py
_typed_from_items
def _typed_from_items(items): """ Construct strongly typed attributes (properties) from a dictionary of name and :class:`~exa.typed.Typed` object pairs. See Also: :func:`~exa.typed.typed` """ dct = {} for name, attr in items: if isinstance(attr, Typed): dct[name] = attr(name) return dct
python
def _typed_from_items(items): """ Construct strongly typed attributes (properties) from a dictionary of name and :class:`~exa.typed.Typed` object pairs. See Also: :func:`~exa.typed.typed` """ dct = {} for name, attr in items: if isinstance(attr, Typed): dct[name] = attr(name) return dct
['def', '_typed_from_items', '(', 'items', ')', ':', 'dct', '=', '{', '}', 'for', 'name', ',', 'attr', 'in', 'items', ':', 'if', 'isinstance', '(', 'attr', ',', 'Typed', ')', ':', 'dct', '[', 'name', ']', '=', 'attr', '(', 'name', ')', 'return', 'dct']
Construct strongly typed attributes (properties) from a dictionary of name and :class:`~exa.typed.Typed` object pairs. See Also: :func:`~exa.typed.typed`
['Construct', 'strongly', 'typed', 'attributes', '(', 'properties', ')', 'from', 'a', 'dictionary', 'of', 'name', 'and', ':', 'class', ':', '~exa', '.', 'typed', '.', 'Typed', 'object', 'pairs', '.']
train
https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/typed.py#L47-L59
5,210
KelSolaar/Foundations
foundations/namespace.py
remove_namespace
def remove_namespace(attribute, namespace_splitter=NAMESPACE_SPLITTER, root_only=False): """ Returns attribute with stripped foundations.namespace. Usage:: >>> remove_namespace("grandParent|parent|child") u'child' >>> remove_namespace("grandParent|parent|child", root_only=True) u'parent|child' :param attribute: Attribute. :type attribute: unicode :param namespace_splitter: Namespace splitter character. :type namespace_splitter: unicode :param root_only: Remove only root foundations.namespace. :type root_only: bool :return: Attribute without foundations.namespace. :rtype: unicode """ attribute_tokens = attribute.split(namespace_splitter) stripped_attribute = root_only and namespace_splitter.join(attribute_tokens[1:]) or \ attribute_tokens[len(attribute_tokens) - 1] LOGGER.debug("> Attribute: '{0}', stripped attribute: '{1}'.".format(attribute, stripped_attribute)) return stripped_attribute
python
def remove_namespace(attribute, namespace_splitter=NAMESPACE_SPLITTER, root_only=False): """ Returns attribute with stripped foundations.namespace. Usage:: >>> remove_namespace("grandParent|parent|child") u'child' >>> remove_namespace("grandParent|parent|child", root_only=True) u'parent|child' :param attribute: Attribute. :type attribute: unicode :param namespace_splitter: Namespace splitter character. :type namespace_splitter: unicode :param root_only: Remove only root foundations.namespace. :type root_only: bool :return: Attribute without foundations.namespace. :rtype: unicode """ attribute_tokens = attribute.split(namespace_splitter) stripped_attribute = root_only and namespace_splitter.join(attribute_tokens[1:]) or \ attribute_tokens[len(attribute_tokens) - 1] LOGGER.debug("> Attribute: '{0}', stripped attribute: '{1}'.".format(attribute, stripped_attribute)) return stripped_attribute
['def', 'remove_namespace', '(', 'attribute', ',', 'namespace_splitter', '=', 'NAMESPACE_SPLITTER', ',', 'root_only', '=', 'False', ')', ':', 'attribute_tokens', '=', 'attribute', '.', 'split', '(', 'namespace_splitter', ')', 'stripped_attribute', '=', 'root_only', 'and', 'namespace_splitter', '.', 'join', '(', 'attribute_tokens', '[', '1', ':', ']', ')', 'or', 'attribute_tokens', '[', 'len', '(', 'attribute_tokens', ')', '-', '1', ']', 'LOGGER', '.', 'debug', '(', '"> Attribute: \'{0}\', stripped attribute: \'{1}\'."', '.', 'format', '(', 'attribute', ',', 'stripped_attribute', ')', ')', 'return', 'stripped_attribute']
Returns attribute with stripped foundations.namespace. Usage:: >>> remove_namespace("grandParent|parent|child") u'child' >>> remove_namespace("grandParent|parent|child", root_only=True) u'parent|child' :param attribute: Attribute. :type attribute: unicode :param namespace_splitter: Namespace splitter character. :type namespace_splitter: unicode :param root_only: Remove only root foundations.namespace. :type root_only: bool :return: Attribute without foundations.namespace. :rtype: unicode
['Returns', 'attribute', 'with', 'stripped', 'foundations', '.', 'namespace', '.']
train
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/namespace.py#L98-L123
5,211
pbrod/numdifftools
src/numdifftools/finite_difference.py
LogRule.rule
def rule(self): """ Return finite differencing rule. The rule is for a nominal unit step size, and must be scaled later to reflect the local step size. Member methods used ------------------- _fd_matrix Member variables used --------------------- n order method """ step_ratio = self.step_ratio method = self.method if method in ('multicomplex', ) or self.n == 0: return np.ones((1,)) order, method_order = self.n - 1, self._method_order parity = self._parity(method, order, method_order) step = self._richardson_step() num_terms, ix = (order + method_order) // step, order // step fd_rules = FD_RULES.get((step_ratio, parity, num_terms)) if fd_rules is None: fd_mat = self._fd_matrix(step_ratio, parity, num_terms) fd_rules = linalg.pinv(fd_mat) FD_RULES[(step_ratio, parity, num_terms)] = fd_rules if self._flip_fd_rule: return -fd_rules[ix] return fd_rules[ix]
python
def rule(self): """ Return finite differencing rule. The rule is for a nominal unit step size, and must be scaled later to reflect the local step size. Member methods used ------------------- _fd_matrix Member variables used --------------------- n order method """ step_ratio = self.step_ratio method = self.method if method in ('multicomplex', ) or self.n == 0: return np.ones((1,)) order, method_order = self.n - 1, self._method_order parity = self._parity(method, order, method_order) step = self._richardson_step() num_terms, ix = (order + method_order) // step, order // step fd_rules = FD_RULES.get((step_ratio, parity, num_terms)) if fd_rules is None: fd_mat = self._fd_matrix(step_ratio, parity, num_terms) fd_rules = linalg.pinv(fd_mat) FD_RULES[(step_ratio, parity, num_terms)] = fd_rules if self._flip_fd_rule: return -fd_rules[ix] return fd_rules[ix]
['def', 'rule', '(', 'self', ')', ':', 'step_ratio', '=', 'self', '.', 'step_ratio', 'method', '=', 'self', '.', 'method', 'if', 'method', 'in', '(', "'multicomplex'", ',', ')', 'or', 'self', '.', 'n', '==', '0', ':', 'return', 'np', '.', 'ones', '(', '(', '1', ',', ')', ')', 'order', ',', 'method_order', '=', 'self', '.', 'n', '-', '1', ',', 'self', '.', '_method_order', 'parity', '=', 'self', '.', '_parity', '(', 'method', ',', 'order', ',', 'method_order', ')', 'step', '=', 'self', '.', '_richardson_step', '(', ')', 'num_terms', ',', 'ix', '=', '(', 'order', '+', 'method_order', ')', '//', 'step', ',', 'order', '//', 'step', 'fd_rules', '=', 'FD_RULES', '.', 'get', '(', '(', 'step_ratio', ',', 'parity', ',', 'num_terms', ')', ')', 'if', 'fd_rules', 'is', 'None', ':', 'fd_mat', '=', 'self', '.', '_fd_matrix', '(', 'step_ratio', ',', 'parity', ',', 'num_terms', ')', 'fd_rules', '=', 'linalg', '.', 'pinv', '(', 'fd_mat', ')', 'FD_RULES', '[', '(', 'step_ratio', ',', 'parity', ',', 'num_terms', ')', ']', '=', 'fd_rules', 'if', 'self', '.', '_flip_fd_rule', ':', 'return', '-', 'fd_rules', '[', 'ix', ']', 'return', 'fd_rules', '[', 'ix', ']']
Return finite differencing rule. The rule is for a nominal unit step size, and must be scaled later to reflect the local step size. Member methods used ------------------- _fd_matrix Member variables used --------------------- n order method
['Return', 'finite', 'differencing', 'rule', '.']
train
https://github.com/pbrod/numdifftools/blob/2c88878df732c9c6629febea56e7a91fd898398d/src/numdifftools/finite_difference.py#L206-L240
5,212
mcs07/ChemDataExtractor
chemdataextractor/parse/cem.py
standardize_role
def standardize_role(role): """Convert role text into standardized form.""" role = role.lower() if any(c in role for c in {'synthesis', 'give', 'yield', 'afford', 'product', 'preparation of'}): return 'product' return role
python
def standardize_role(role): """Convert role text into standardized form.""" role = role.lower() if any(c in role for c in {'synthesis', 'give', 'yield', 'afford', 'product', 'preparation of'}): return 'product' return role
['def', 'standardize_role', '(', 'role', ')', ':', 'role', '=', 'role', '.', 'lower', '(', ')', 'if', 'any', '(', 'c', 'in', 'role', 'for', 'c', 'in', '{', "'synthesis'", ',', "'give'", ',', "'yield'", ',', "'afford'", ',', "'product'", ',', "'preparation of'", '}', ')', ':', 'return', "'product'", 'return', 'role']
Convert role text into standardized form.
['Convert', 'role', 'text', 'into', 'standardized', 'form', '.']
train
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/parse/cem.py#L279-L284
5,213
Rapptz/discord.py
discord/client.py
Client.on_error
async def on_error(self, event_method, *args, **kwargs): """|coro| The default error handler provided by the client. By default this prints to :data:`sys.stderr` however it could be overridden to have a different implementation. Check :func:`discord.on_error` for more details. """ print('Ignoring exception in {}'.format(event_method), file=sys.stderr) traceback.print_exc()
python
async def on_error(self, event_method, *args, **kwargs): """|coro| The default error handler provided by the client. By default this prints to :data:`sys.stderr` however it could be overridden to have a different implementation. Check :func:`discord.on_error` for more details. """ print('Ignoring exception in {}'.format(event_method), file=sys.stderr) traceback.print_exc()
['async', 'def', 'on_error', '(', 'self', ',', 'event_method', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'print', '(', "'Ignoring exception in {}'", '.', 'format', '(', 'event_method', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'traceback', '.', 'print_exc', '(', ')']
|coro| The default error handler provided by the client. By default this prints to :data:`sys.stderr` however it could be overridden to have a different implementation. Check :func:`discord.on_error` for more details.
['|coro|']
train
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/client.py#L300-L310
5,214
marcomusy/vtkplotter
vtkplotter/utils.py
grep
def grep(filename, tag, firstOccurrence=False): """Greps the line that starts with a specific `tag` string from inside a file.""" import re try: afile = open(filename, "r") except: print("Error in utils.grep(): cannot open file", filename) exit() content = None for line in afile: if re.search(tag, line): content = line.split() if firstOccurrence: break if content: if len(content) == 2: content = content[1] else: content = content[1:] afile.close() return content
python
def grep(filename, tag, firstOccurrence=False): """Greps the line that starts with a specific `tag` string from inside a file.""" import re try: afile = open(filename, "r") except: print("Error in utils.grep(): cannot open file", filename) exit() content = None for line in afile: if re.search(tag, line): content = line.split() if firstOccurrence: break if content: if len(content) == 2: content = content[1] else: content = content[1:] afile.close() return content
['def', 'grep', '(', 'filename', ',', 'tag', ',', 'firstOccurrence', '=', 'False', ')', ':', 'import', 're', 'try', ':', 'afile', '=', 'open', '(', 'filename', ',', '"r"', ')', 'except', ':', 'print', '(', '"Error in utils.grep(): cannot open file"', ',', 'filename', ')', 'exit', '(', ')', 'content', '=', 'None', 'for', 'line', 'in', 'afile', ':', 'if', 're', '.', 'search', '(', 'tag', ',', 'line', ')', ':', 'content', '=', 'line', '.', 'split', '(', ')', 'if', 'firstOccurrence', ':', 'break', 'if', 'content', ':', 'if', 'len', '(', 'content', ')', '==', '2', ':', 'content', '=', 'content', '[', '1', ']', 'else', ':', 'content', '=', 'content', '[', '1', ':', ']', 'afile', '.', 'close', '(', ')', 'return', 'content']
Greps the line that starts with a specific `tag` string from inside a file.
['Greps', 'the', 'line', 'that', 'starts', 'with', 'a', 'specific', 'tag', 'string', 'from', 'inside', 'a', 'file', '.']
train
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/utils.py#L256-L277
5,215
awslabs/aws-sam-cli
samcli/commands/local/lib/provider.py
LayerVersion._compute_layer_name
def _compute_layer_name(is_defined_within_template, arn): """ Computes a unique name based on the LayerVersion Arn Format: <Name of the LayerVersion>-<Version of the LayerVersion>-<sha256 of the arn> Parameters ---------- is_defined_within_template bool True if the resource is a Ref to a resource otherwise False arn str ARN of the Resource Returns ------- str A unique name that represents the LayerVersion """ # If the Layer is defined in the template, the arn will represent the LogicalId of the LayerVersion Resource, # which does not require creating a name based on the arn. if is_defined_within_template: return arn try: _, layer_name, layer_version = arn.rsplit(':', 2) except ValueError: raise InvalidLayerVersionArn(arn + " is an Invalid Layer Arn.") return LayerVersion.LAYER_NAME_DELIMETER.join([layer_name, layer_version, hashlib.sha256(arn.encode('utf-8')).hexdigest()[0:10]])
python
def _compute_layer_name(is_defined_within_template, arn): """ Computes a unique name based on the LayerVersion Arn Format: <Name of the LayerVersion>-<Version of the LayerVersion>-<sha256 of the arn> Parameters ---------- is_defined_within_template bool True if the resource is a Ref to a resource otherwise False arn str ARN of the Resource Returns ------- str A unique name that represents the LayerVersion """ # If the Layer is defined in the template, the arn will represent the LogicalId of the LayerVersion Resource, # which does not require creating a name based on the arn. if is_defined_within_template: return arn try: _, layer_name, layer_version = arn.rsplit(':', 2) except ValueError: raise InvalidLayerVersionArn(arn + " is an Invalid Layer Arn.") return LayerVersion.LAYER_NAME_DELIMETER.join([layer_name, layer_version, hashlib.sha256(arn.encode('utf-8')).hexdigest()[0:10]])
['def', '_compute_layer_name', '(', 'is_defined_within_template', ',', 'arn', ')', ':', '# If the Layer is defined in the template, the arn will represent the LogicalId of the LayerVersion Resource,', '# which does not require creating a name based on the arn.', 'if', 'is_defined_within_template', ':', 'return', 'arn', 'try', ':', '_', ',', 'layer_name', ',', 'layer_version', '=', 'arn', '.', 'rsplit', '(', "':'", ',', '2', ')', 'except', 'ValueError', ':', 'raise', 'InvalidLayerVersionArn', '(', 'arn', '+', '" is an Invalid Layer Arn."', ')', 'return', 'LayerVersion', '.', 'LAYER_NAME_DELIMETER', '.', 'join', '(', '[', 'layer_name', ',', 'layer_version', ',', 'hashlib', '.', 'sha256', '(', 'arn', '.', 'encode', '(', "'utf-8'", ')', ')', '.', 'hexdigest', '(', ')', '[', '0', ':', '10', ']', ']', ')']
Computes a unique name based on the LayerVersion Arn Format: <Name of the LayerVersion>-<Version of the LayerVersion>-<sha256 of the arn> Parameters ---------- is_defined_within_template bool True if the resource is a Ref to a resource otherwise False arn str ARN of the Resource Returns ------- str A unique name that represents the LayerVersion
['Computes', 'a', 'unique', 'name', 'based', 'on', 'the', 'LayerVersion', 'Arn']
train
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/provider.py#L102-L134
5,216
impact27/registrator
registrator/image.py
center_of_mass
def center_of_mass(X, Y): """Get center of mass Parameters ---------- X: 1d array X values Y: 1d array Y values Returns ------- res: number The position of the center of mass in X Notes ----- Uses least squares """ X = np.asarray(X) Y = np.asarray(Y) return (X * Y).sum() / Y.sum()
python
def center_of_mass(X, Y): """Get center of mass Parameters ---------- X: 1d array X values Y: 1d array Y values Returns ------- res: number The position of the center of mass in X Notes ----- Uses least squares """ X = np.asarray(X) Y = np.asarray(Y) return (X * Y).sum() / Y.sum()
['def', 'center_of_mass', '(', 'X', ',', 'Y', ')', ':', 'X', '=', 'np', '.', 'asarray', '(', 'X', ')', 'Y', '=', 'np', '.', 'asarray', '(', 'Y', ')', 'return', '(', 'X', '*', 'Y', ')', '.', 'sum', '(', ')', '/', 'Y', '.', 'sum', '(', ')']
Get center of mass Parameters ---------- X: 1d array X values Y: 1d array Y values Returns ------- res: number The position of the center of mass in X Notes ----- Uses least squares
['Get', 'center', 'of', 'mass']
train
https://github.com/impact27/registrator/blob/04c099d83e0466207dc5b2e40d9b03db020d4dad/registrator/image.py#L1019-L1040
5,217
senaite/senaite.core
bika/lims/content/worksheet.py
Worksheet.getNumberOfRegularSamples
def getNumberOfRegularSamples(self): """ Returns the number of regular samples. :returns: number of regular samples :rtype: integer """ analyses = self.getRegularAnalyses() samples = [a.getRequestUID() for a in analyses] # discarding any duplicate values return len(set(samples))
python
def getNumberOfRegularSamples(self): """ Returns the number of regular samples. :returns: number of regular samples :rtype: integer """ analyses = self.getRegularAnalyses() samples = [a.getRequestUID() for a in analyses] # discarding any duplicate values return len(set(samples))
['def', 'getNumberOfRegularSamples', '(', 'self', ')', ':', 'analyses', '=', 'self', '.', 'getRegularAnalyses', '(', ')', 'samples', '=', '[', 'a', '.', 'getRequestUID', '(', ')', 'for', 'a', 'in', 'analyses', ']', '# discarding any duplicate values', 'return', 'len', '(', 'set', '(', 'samples', ')', ')']
Returns the number of regular samples. :returns: number of regular samples :rtype: integer
['Returns', 'the', 'number', 'of', 'regular', 'samples', '.', ':', 'returns', ':', 'number', 'of', 'regular', 'samples', ':', 'rtype', ':', 'integer']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/worksheet.py#L1232-L1241
5,218
ejeschke/ginga
ginga/ImageView.py
ImageViewBase.clear
def clear(self): """Clear the displayed image.""" self._imgobj = None try: # See if there is an image on the canvas self.canvas.delete_object_by_tag(self._canvas_img_tag) self.redraw() except KeyError: pass
python
def clear(self): """Clear the displayed image.""" self._imgobj = None try: # See if there is an image on the canvas self.canvas.delete_object_by_tag(self._canvas_img_tag) self.redraw() except KeyError: pass
['def', 'clear', '(', 'self', ')', ':', 'self', '.', '_imgobj', '=', 'None', 'try', ':', '# See if there is an image on the canvas', 'self', '.', 'canvas', '.', 'delete_object_by_tag', '(', 'self', '.', '_canvas_img_tag', ')', 'self', '.', 'redraw', '(', ')', 'except', 'KeyError', ':', 'pass']
Clear the displayed image.
['Clear', 'the', 'displayed', 'image', '.']
train
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L1003-L1011
5,219
google/apitools
apitools/base/protorpclite/messages.py
_DefinitionClass.definition_name
def definition_name(cls): """Helper method for creating definition name. Names will be generated to include the classes package name, scope (if the class is nested in another definition) and class name. By default, the package name for a definition is derived from its module name. However, this value can be overriden by placing a 'package' attribute in the module that contains the definition class. For example: package = 'some.alternate.package' class MyMessage(Message): ... >>> MyMessage.definition_name() some.alternate.package.MyMessage Returns: Dot-separated fully qualified name of definition. """ outer_definition_name = cls.outer_definition_name() if outer_definition_name is None: return six.text_type(cls.__name__) return u'%s.%s' % (outer_definition_name, cls.__name__)
python
def definition_name(cls): """Helper method for creating definition name. Names will be generated to include the classes package name, scope (if the class is nested in another definition) and class name. By default, the package name for a definition is derived from its module name. However, this value can be overriden by placing a 'package' attribute in the module that contains the definition class. For example: package = 'some.alternate.package' class MyMessage(Message): ... >>> MyMessage.definition_name() some.alternate.package.MyMessage Returns: Dot-separated fully qualified name of definition. """ outer_definition_name = cls.outer_definition_name() if outer_definition_name is None: return six.text_type(cls.__name__) return u'%s.%s' % (outer_definition_name, cls.__name__)
['def', 'definition_name', '(', 'cls', ')', ':', 'outer_definition_name', '=', 'cls', '.', 'outer_definition_name', '(', ')', 'if', 'outer_definition_name', 'is', 'None', ':', 'return', 'six', '.', 'text_type', '(', 'cls', '.', '__name__', ')', 'return', "u'%s.%s'", '%', '(', 'outer_definition_name', ',', 'cls', '.', '__name__', ')']
Helper method for creating definition name. Names will be generated to include the classes package name, scope (if the class is nested in another definition) and class name. By default, the package name for a definition is derived from its module name. However, this value can be overriden by placing a 'package' attribute in the module that contains the definition class. For example: package = 'some.alternate.package' class MyMessage(Message): ... >>> MyMessage.definition_name() some.alternate.package.MyMessage Returns: Dot-separated fully qualified name of definition.
['Helper', 'method', 'for', 'creating', 'definition', 'name', '.']
train
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/messages.py#L227-L254
5,220
ZELLMECHANIK-DRESDEN/dclab
dclab/polygon_filter.py
PolygonFilter.copy
def copy(self, invert=False): """Return a copy of the current instance Parameters ---------- invert: bool The copy will be inverted w.r.t. the original """ if invert: inverted = not self.inverted else: inverted = self.inverted return PolygonFilter(axes=self.axes, points=self.points, name=self.name, inverted=inverted)
python
def copy(self, invert=False): """Return a copy of the current instance Parameters ---------- invert: bool The copy will be inverted w.r.t. the original """ if invert: inverted = not self.inverted else: inverted = self.inverted return PolygonFilter(axes=self.axes, points=self.points, name=self.name, inverted=inverted)
['def', 'copy', '(', 'self', ',', 'invert', '=', 'False', ')', ':', 'if', 'invert', ':', 'inverted', '=', 'not', 'self', '.', 'inverted', 'else', ':', 'inverted', '=', 'self', '.', 'inverted', 'return', 'PolygonFilter', '(', 'axes', '=', 'self', '.', 'axes', ',', 'points', '=', 'self', '.', 'points', ',', 'name', '=', 'self', '.', 'name', ',', 'inverted', '=', 'inverted', ')']
Return a copy of the current instance Parameters ---------- invert: bool The copy will be inverted w.r.t. the original
['Return', 'a', 'copy', 'of', 'the', 'current', 'instance']
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/polygon_filter.py#L179-L195
5,221
gem/oq-engine
openquake/calculators/views.py
view_portfolio_losses
def view_portfolio_losses(token, dstore): """ The losses for the full portfolio, for each realization and loss type, extracted from the event loss table. """ oq = dstore['oqparam'] loss_dt = oq.loss_dt() data = portfolio_loss(dstore).view(loss_dt)[:, 0] rlzids = [str(r) for r in range(len(data))] array = util.compose_arrays(numpy.array(rlzids), data, 'rlz') # this is very sensitive to rounding errors, so I am using a low precision return rst_table(array, fmt='%.5E')
python
def view_portfolio_losses(token, dstore): """ The losses for the full portfolio, for each realization and loss type, extracted from the event loss table. """ oq = dstore['oqparam'] loss_dt = oq.loss_dt() data = portfolio_loss(dstore).view(loss_dt)[:, 0] rlzids = [str(r) for r in range(len(data))] array = util.compose_arrays(numpy.array(rlzids), data, 'rlz') # this is very sensitive to rounding errors, so I am using a low precision return rst_table(array, fmt='%.5E')
['def', 'view_portfolio_losses', '(', 'token', ',', 'dstore', ')', ':', 'oq', '=', 'dstore', '[', "'oqparam'", ']', 'loss_dt', '=', 'oq', '.', 'loss_dt', '(', ')', 'data', '=', 'portfolio_loss', '(', 'dstore', ')', '.', 'view', '(', 'loss_dt', ')', '[', ':', ',', '0', ']', 'rlzids', '=', '[', 'str', '(', 'r', ')', 'for', 'r', 'in', 'range', '(', 'len', '(', 'data', ')', ')', ']', 'array', '=', 'util', '.', 'compose_arrays', '(', 'numpy', '.', 'array', '(', 'rlzids', ')', ',', 'data', ',', "'rlz'", ')', '# this is very sensitive to rounding errors, so I am using a low precision', 'return', 'rst_table', '(', 'array', ',', 'fmt', '=', "'%.5E'", ')']
The losses for the full portfolio, for each realization and loss type, extracted from the event loss table.
['The', 'losses', 'for', 'the', 'full', 'portfolio', 'for', 'each', 'realization', 'and', 'loss', 'type', 'extracted', 'from', 'the', 'event', 'loss', 'table', '.']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/views.py#L382-L393
5,222
ocadotechnology/django-closuretree
closuretree/models.py
ClosureModel.rebuildtable
def rebuildtable(cls): """Regenerate the entire closuretree.""" cls._closure_model.objects.all().delete() cls._closure_model.objects.bulk_create([cls._closure_model( parent_id=x['pk'], child_id=x['pk'], depth=0 ) for x in cls.objects.values("pk")]) for node in cls.objects.all(): node._closure_createlink()
python
def rebuildtable(cls): """Regenerate the entire closuretree.""" cls._closure_model.objects.all().delete() cls._closure_model.objects.bulk_create([cls._closure_model( parent_id=x['pk'], child_id=x['pk'], depth=0 ) for x in cls.objects.values("pk")]) for node in cls.objects.all(): node._closure_createlink()
['def', 'rebuildtable', '(', 'cls', ')', ':', 'cls', '.', '_closure_model', '.', 'objects', '.', 'all', '(', ')', '.', 'delete', '(', ')', 'cls', '.', '_closure_model', '.', 'objects', '.', 'bulk_create', '(', '[', 'cls', '.', '_closure_model', '(', 'parent_id', '=', 'x', '[', "'pk'", ']', ',', 'child_id', '=', 'x', '[', "'pk'", ']', ',', 'depth', '=', '0', ')', 'for', 'x', 'in', 'cls', '.', 'objects', '.', 'values', '(', '"pk"', ')', ']', ')', 'for', 'node', 'in', 'cls', '.', 'objects', '.', 'all', '(', ')', ':', 'node', '.', '_closure_createlink', '(', ')']
Regenerate the entire closuretree.
['Regenerate', 'the', 'entire', 'closuretree', '.']
train
https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L134-L143
5,223
robertmartin8/PyPortfolioOpt
pypfopt/value_at_risk.py
CVAROpt.min_cvar
def min_cvar(self, s=10000, beta=0.95, random_state=None): """ Find the portfolio weights that minimises the CVaR, via Monte Carlo sampling from the return distribution. :param s: number of bootstrap draws, defaults to 10000 :type s: int, optional :param beta: "significance level" (i. 1 - q), defaults to 0.95 :type beta: float, optional :param random_state: seed for random sampling, defaults to None :type random_state: int, optional :return: asset weights for the Sharpe-maximising portfolio :rtype: dict """ args = (self.returns, s, beta, random_state) result = noisyopt.minimizeSPSA( objective_functions.negative_cvar, args=args, bounds=self.bounds, x0=self.initial_guess, niter=1000, paired=False, ) self.weights = self.normalize_weights(result["x"]) return dict(zip(self.tickers, self.weights))
python
def min_cvar(self, s=10000, beta=0.95, random_state=None): """ Find the portfolio weights that minimises the CVaR, via Monte Carlo sampling from the return distribution. :param s: number of bootstrap draws, defaults to 10000 :type s: int, optional :param beta: "significance level" (i. 1 - q), defaults to 0.95 :type beta: float, optional :param random_state: seed for random sampling, defaults to None :type random_state: int, optional :return: asset weights for the Sharpe-maximising portfolio :rtype: dict """ args = (self.returns, s, beta, random_state) result = noisyopt.minimizeSPSA( objective_functions.negative_cvar, args=args, bounds=self.bounds, x0=self.initial_guess, niter=1000, paired=False, ) self.weights = self.normalize_weights(result["x"]) return dict(zip(self.tickers, self.weights))
['def', 'min_cvar', '(', 'self', ',', 's', '=', '10000', ',', 'beta', '=', '0.95', ',', 'random_state', '=', 'None', ')', ':', 'args', '=', '(', 'self', '.', 'returns', ',', 's', ',', 'beta', ',', 'random_state', ')', 'result', '=', 'noisyopt', '.', 'minimizeSPSA', '(', 'objective_functions', '.', 'negative_cvar', ',', 'args', '=', 'args', ',', 'bounds', '=', 'self', '.', 'bounds', ',', 'x0', '=', 'self', '.', 'initial_guess', ',', 'niter', '=', '1000', ',', 'paired', '=', 'False', ',', ')', 'self', '.', 'weights', '=', 'self', '.', 'normalize_weights', '(', 'result', '[', '"x"', ']', ')', 'return', 'dict', '(', 'zip', '(', 'self', '.', 'tickers', ',', 'self', '.', 'weights', ')', ')']
Find the portfolio weights that minimises the CVaR, via Monte Carlo sampling from the return distribution. :param s: number of bootstrap draws, defaults to 10000 :type s: int, optional :param beta: "significance level" (i. 1 - q), defaults to 0.95 :type beta: float, optional :param random_state: seed for random sampling, defaults to None :type random_state: int, optional :return: asset weights for the Sharpe-maximising portfolio :rtype: dict
['Find', 'the', 'portfolio', 'weights', 'that', 'minimises', 'the', 'CVaR', 'via', 'Monte', 'Carlo', 'sampling', 'from', 'the', 'return', 'distribution', '.']
train
https://github.com/robertmartin8/PyPortfolioOpt/blob/dfad1256cb6995c7fbd7a025eedb54b1ca04b2fc/pypfopt/value_at_risk.py#L54-L78
5,224
Sanji-IO/sanji
sanji/model/__init__.py
Model.maxId
def maxId(self): """int: current max id of objects""" if len(self.model.db) == 0: return 0 return max(map(lambda obj: obj["id"], self.model.db))
python
def maxId(self): """int: current max id of objects""" if len(self.model.db) == 0: return 0 return max(map(lambda obj: obj["id"], self.model.db))
['def', 'maxId', '(', 'self', ')', ':', 'if', 'len', '(', 'self', '.', 'model', '.', 'db', ')', '==', '0', ':', 'return', '0', 'return', 'max', '(', 'map', '(', 'lambda', 'obj', ':', 'obj', '[', '"id"', ']', ',', 'self', '.', 'model', '.', 'db', ')', ')']
int: current max id of objects
['int', ':', 'current', 'max', 'id', 'of', 'objects']
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/model/__init__.py#L47-L52
5,225
toumorokoshi/sprinter
sprinter/formula/perforce.py
PerforceFormula.__write_p4settings
def __write_p4settings(self, config): """ write perforce settings """ self.logger.info("Writing p4settings...") root_dir = os.path.expanduser(config.get('root_path')) p4settings_path = os.path.join(root_dir, ".p4settings") if os.path.exists(p4settings_path): if self.target.get('overwrite_p4settings', False): self.logger.info("Overwriting existing p4settings...") os.remove(p4settings_path) else: return with open(p4settings_path, "w+") as p4settings_file: p4settings_file.write(p4settings_template % config.to_dict()) if config.get('write_password_p4settings', 'no'): p4settings_file.write("\nP4PASSWD=%s" % config['password'])
python
def __write_p4settings(self, config): """ write perforce settings """ self.logger.info("Writing p4settings...") root_dir = os.path.expanduser(config.get('root_path')) p4settings_path = os.path.join(root_dir, ".p4settings") if os.path.exists(p4settings_path): if self.target.get('overwrite_p4settings', False): self.logger.info("Overwriting existing p4settings...") os.remove(p4settings_path) else: return with open(p4settings_path, "w+") as p4settings_file: p4settings_file.write(p4settings_template % config.to_dict()) if config.get('write_password_p4settings', 'no'): p4settings_file.write("\nP4PASSWD=%s" % config['password'])
['def', '__write_p4settings', '(', 'self', ',', 'config', ')', ':', 'self', '.', 'logger', '.', 'info', '(', '"Writing p4settings..."', ')', 'root_dir', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'config', '.', 'get', '(', "'root_path'", ')', ')', 'p4settings_path', '=', 'os', '.', 'path', '.', 'join', '(', 'root_dir', ',', '".p4settings"', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'p4settings_path', ')', ':', 'if', 'self', '.', 'target', '.', 'get', '(', "'overwrite_p4settings'", ',', 'False', ')', ':', 'self', '.', 'logger', '.', 'info', '(', '"Overwriting existing p4settings..."', ')', 'os', '.', 'remove', '(', 'p4settings_path', ')', 'else', ':', 'return', 'with', 'open', '(', 'p4settings_path', ',', '"w+"', ')', 'as', 'p4settings_file', ':', 'p4settings_file', '.', 'write', '(', 'p4settings_template', '%', 'config', '.', 'to_dict', '(', ')', ')', 'if', 'config', '.', 'get', '(', "'write_password_p4settings'", ',', "'no'", ')', ':', 'p4settings_file', '.', 'write', '(', '"\\nP4PASSWD=%s"', '%', 'config', '[', "'password'", ']', ')']
write perforce settings
['write', 'perforce', 'settings']
train
https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/formula/perforce.py#L188-L202
5,226
maas/python-libmaas
maas/client/viscera/partitions.py
Partition.unformat
async def unformat(self): """Unformat this partition.""" self._data = await self._handler.unformat( system_id=self.block_device.node.system_id, device_id=self.block_device.id, id=self.id)
python
async def unformat(self): """Unformat this partition.""" self._data = await self._handler.unformat( system_id=self.block_device.node.system_id, device_id=self.block_device.id, id=self.id)
['async', 'def', 'unformat', '(', 'self', ')', ':', 'self', '.', '_data', '=', 'await', 'self', '.', '_handler', '.', 'unformat', '(', 'system_id', '=', 'self', '.', 'block_device', '.', 'node', '.', 'system_id', ',', 'device_id', '=', 'self', '.', 'block_device', '.', 'id', ',', 'id', '=', 'self', '.', 'id', ')']
Unformat this partition.
['Unformat', 'this', 'partition', '.']
train
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/partitions.py#L91-L95
5,227
jkitzes/macroeco
macroeco/empirical/_empirical.py
_product
def _product(*args, **kwds): """ Generates cartesian product of lists given as arguments From itertools.product documentation """ pools = map(tuple, args) * kwds.get('repeat', 1) result = [[]] for pool in pools: result = [x+[y] for x in result for y in pool] return result
python
def _product(*args, **kwds): """ Generates cartesian product of lists given as arguments From itertools.product documentation """ pools = map(tuple, args) * kwds.get('repeat', 1) result = [[]] for pool in pools: result = [x+[y] for x in result for y in pool] return result
['def', '_product', '(', '*', 'args', ',', '*', '*', 'kwds', ')', ':', 'pools', '=', 'map', '(', 'tuple', ',', 'args', ')', '*', 'kwds', '.', 'get', '(', "'repeat'", ',', '1', ')', 'result', '=', '[', '[', ']', ']', 'for', 'pool', 'in', 'pools', ':', 'result', '=', '[', 'x', '+', '[', 'y', ']', 'for', 'x', 'in', 'result', 'for', 'y', 'in', 'pool', ']', 'return', 'result']
Generates cartesian product of lists given as arguments From itertools.product documentation
['Generates', 'cartesian', 'product', 'of', 'lists', 'given', 'as', 'arguments']
train
https://github.com/jkitzes/macroeco/blob/ee5fac5560a2d64de3a64738b5bc6833e2d7ff2e/macroeco/empirical/_empirical.py#L1296-L1307
5,228
bitprophet/releases
releases/models.py
Issue.default_spec
def default_spec(self, manager): """ Given the current release-lines structure, return a default Spec. Specifics: * For feature-like issues, only the highest major release is used, so given a ``manager`` with top level keys of ``[1, 2]``, this would return ``Spec(">=2")``. * When ``releases_always_forwardport_features`` is ``True``, that behavior is nullified, and this function always returns the empty ``Spec`` (which matches any and all versions/lines). * For bugfix-like issues, we only consider major release families which have actual releases already. * Thus the core difference here is that features are 'consumed' by upcoming major releases, and bugfixes are not. * When the ``unstable_prehistory`` setting is ``True``, the default spec starts at the oldest non-zero release line. (Otherwise, issues posted after prehistory ends would try being added to the 0.x part of the tree, which makes no sense in unstable-prehistory mode.) """ # TODO: I feel like this + the surrounding bits in add_to_manager() # could be consolidated & simplified... specstr = "" # Make sure truly-default spec skips 0.x if prehistory was unstable. stable_families = manager.stable_families if manager.config.releases_unstable_prehistory and stable_families: specstr = ">={}".format(min(stable_families)) if self.is_featurelike: # TODO: if app->config-><releases_always_forwardport_features or # w/e if True: specstr = ">={}".format(max(manager.keys())) else: # Can only meaningfully limit to minor release buckets if they # actually exist yet. buckets = self.minor_releases(manager) if buckets: specstr = ">={}".format(max(buckets)) return Spec(specstr) if specstr else Spec()
python
def default_spec(self, manager): """ Given the current release-lines structure, return a default Spec. Specifics: * For feature-like issues, only the highest major release is used, so given a ``manager`` with top level keys of ``[1, 2]``, this would return ``Spec(">=2")``. * When ``releases_always_forwardport_features`` is ``True``, that behavior is nullified, and this function always returns the empty ``Spec`` (which matches any and all versions/lines). * For bugfix-like issues, we only consider major release families which have actual releases already. * Thus the core difference here is that features are 'consumed' by upcoming major releases, and bugfixes are not. * When the ``unstable_prehistory`` setting is ``True``, the default spec starts at the oldest non-zero release line. (Otherwise, issues posted after prehistory ends would try being added to the 0.x part of the tree, which makes no sense in unstable-prehistory mode.) """ # TODO: I feel like this + the surrounding bits in add_to_manager() # could be consolidated & simplified... specstr = "" # Make sure truly-default spec skips 0.x if prehistory was unstable. stable_families = manager.stable_families if manager.config.releases_unstable_prehistory and stable_families: specstr = ">={}".format(min(stable_families)) if self.is_featurelike: # TODO: if app->config-><releases_always_forwardport_features or # w/e if True: specstr = ">={}".format(max(manager.keys())) else: # Can only meaningfully limit to minor release buckets if they # actually exist yet. buckets = self.minor_releases(manager) if buckets: specstr = ">={}".format(max(buckets)) return Spec(specstr) if specstr else Spec()
['def', 'default_spec', '(', 'self', ',', 'manager', ')', ':', '# TODO: I feel like this + the surrounding bits in add_to_manager()', '# could be consolidated & simplified...', 'specstr', '=', '""', '# Make sure truly-default spec skips 0.x if prehistory was unstable.', 'stable_families', '=', 'manager', '.', 'stable_families', 'if', 'manager', '.', 'config', '.', 'releases_unstable_prehistory', 'and', 'stable_families', ':', 'specstr', '=', '">={}"', '.', 'format', '(', 'min', '(', 'stable_families', ')', ')', 'if', 'self', '.', 'is_featurelike', ':', '# TODO: if app->config-><releases_always_forwardport_features or', '# w/e', 'if', 'True', ':', 'specstr', '=', '">={}"', '.', 'format', '(', 'max', '(', 'manager', '.', 'keys', '(', ')', ')', ')', 'else', ':', '# Can only meaningfully limit to minor release buckets if they', '# actually exist yet.', 'buckets', '=', 'self', '.', 'minor_releases', '(', 'manager', ')', 'if', 'buckets', ':', 'specstr', '=', '">={}"', '.', 'format', '(', 'max', '(', 'buckets', ')', ')', 'return', 'Spec', '(', 'specstr', ')', 'if', 'specstr', 'else', 'Spec', '(', ')']
Given the current release-lines structure, return a default Spec. Specifics: * For feature-like issues, only the highest major release is used, so given a ``manager`` with top level keys of ``[1, 2]``, this would return ``Spec(">=2")``. * When ``releases_always_forwardport_features`` is ``True``, that behavior is nullified, and this function always returns the empty ``Spec`` (which matches any and all versions/lines). * For bugfix-like issues, we only consider major release families which have actual releases already. * Thus the core difference here is that features are 'consumed' by upcoming major releases, and bugfixes are not. * When the ``unstable_prehistory`` setting is ``True``, the default spec starts at the oldest non-zero release line. (Otherwise, issues posted after prehistory ends would try being added to the 0.x part of the tree, which makes no sense in unstable-prehistory mode.)
['Given', 'the', 'current', 'release', '-', 'lines', 'structure', 'return', 'a', 'default', 'Spec', '.']
train
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/models.py#L82-L125
5,229
zimeon/iiif
iiif_cgi.py
IIIFRequestHandler.error_response
def error_response(self, code, content=''): """Construct and send error response.""" self.send_response(code) self.send_header('Content-Type', 'text/xml') self.add_compliance_header() self.end_headers() self.wfile.write(content)
python
def error_response(self, code, content=''): """Construct and send error response.""" self.send_response(code) self.send_header('Content-Type', 'text/xml') self.add_compliance_header() self.end_headers() self.wfile.write(content)
['def', 'error_response', '(', 'self', ',', 'code', ',', 'content', '=', "''", ')', ':', 'self', '.', 'send_response', '(', 'code', ')', 'self', '.', 'send_header', '(', "'Content-Type'", ',', "'text/xml'", ')', 'self', '.', 'add_compliance_header', '(', ')', 'self', '.', 'end_headers', '(', ')', 'self', '.', 'wfile', '.', 'write', '(', 'content', ')']
Construct and send error response.
['Construct', 'and', 'send', 'error', 'response', '.']
train
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif_cgi.py#L64-L70
5,230
neo4j-contrib/neomodel
neomodel/contrib/spatial_properties.py
PointProperty.inflate
def inflate(self, value): """ Handles the marshalling from Neo4J POINT to NeomodelPoint :param value: Value returned from the database :type value: Neo4J POINT :return: NeomodelPoint """ if not isinstance(value,neo4j.types.spatial.Point): raise TypeError('Invalid datatype to inflate. Expected POINT datatype, received {}'.format(type(value))) try: value_point_crs = SRID_TO_CRS[value.srid] except KeyError: raise ValueError('Invalid SRID to inflate. ' 'Expected one of {}, received {}'.format(SRID_TO_CRS.keys(), value.srid)) if self._crs != value_point_crs: raise ValueError('Invalid CRS. ' 'Expected POINT defined over {}, received {}'.format(self._crs, value_point_crs)) # cartesian if value.srid == 7203: return NeomodelPoint(x=value.x, y=value.y) # cartesian-3d elif value.srid == 9157: return NeomodelPoint(x=value.x, y=value.y, z=value.z) # wgs-84 elif value.srid == 4326: return NeomodelPoint(longitude=value.longitude, latitude=value.latitude) # wgs-83-3d elif value.srid == 4979: return NeomodelPoint(longitude=value.longitude, latitude=value.latitude, height=value.height)
python
def inflate(self, value): """ Handles the marshalling from Neo4J POINT to NeomodelPoint :param value: Value returned from the database :type value: Neo4J POINT :return: NeomodelPoint """ if not isinstance(value,neo4j.types.spatial.Point): raise TypeError('Invalid datatype to inflate. Expected POINT datatype, received {}'.format(type(value))) try: value_point_crs = SRID_TO_CRS[value.srid] except KeyError: raise ValueError('Invalid SRID to inflate. ' 'Expected one of {}, received {}'.format(SRID_TO_CRS.keys(), value.srid)) if self._crs != value_point_crs: raise ValueError('Invalid CRS. ' 'Expected POINT defined over {}, received {}'.format(self._crs, value_point_crs)) # cartesian if value.srid == 7203: return NeomodelPoint(x=value.x, y=value.y) # cartesian-3d elif value.srid == 9157: return NeomodelPoint(x=value.x, y=value.y, z=value.z) # wgs-84 elif value.srid == 4326: return NeomodelPoint(longitude=value.longitude, latitude=value.latitude) # wgs-83-3d elif value.srid == 4979: return NeomodelPoint(longitude=value.longitude, latitude=value.latitude, height=value.height)
['def', 'inflate', '(', 'self', ',', 'value', ')', ':', 'if', 'not', 'isinstance', '(', 'value', ',', 'neo4j', '.', 'types', '.', 'spatial', '.', 'Point', ')', ':', 'raise', 'TypeError', '(', "'Invalid datatype to inflate. Expected POINT datatype, received {}'", '.', 'format', '(', 'type', '(', 'value', ')', ')', ')', 'try', ':', 'value_point_crs', '=', 'SRID_TO_CRS', '[', 'value', '.', 'srid', ']', 'except', 'KeyError', ':', 'raise', 'ValueError', '(', "'Invalid SRID to inflate. '", "'Expected one of {}, received {}'", '.', 'format', '(', 'SRID_TO_CRS', '.', 'keys', '(', ')', ',', 'value', '.', 'srid', ')', ')', 'if', 'self', '.', '_crs', '!=', 'value_point_crs', ':', 'raise', 'ValueError', '(', "'Invalid CRS. '", "'Expected POINT defined over {}, received {}'", '.', 'format', '(', 'self', '.', '_crs', ',', 'value_point_crs', ')', ')', '# cartesian', 'if', 'value', '.', 'srid', '==', '7203', ':', 'return', 'NeomodelPoint', '(', 'x', '=', 'value', '.', 'x', ',', 'y', '=', 'value', '.', 'y', ')', '# cartesian-3d', 'elif', 'value', '.', 'srid', '==', '9157', ':', 'return', 'NeomodelPoint', '(', 'x', '=', 'value', '.', 'x', ',', 'y', '=', 'value', '.', 'y', ',', 'z', '=', 'value', '.', 'z', ')', '# wgs-84', 'elif', 'value', '.', 'srid', '==', '4326', ':', 'return', 'NeomodelPoint', '(', 'longitude', '=', 'value', '.', 'longitude', ',', 'latitude', '=', 'value', '.', 'latitude', ')', '# wgs-83-3d', 'elif', 'value', '.', 'srid', '==', '4979', ':', 'return', 'NeomodelPoint', '(', 'longitude', '=', 'value', '.', 'longitude', ',', 'latitude', '=', 'value', '.', 'latitude', ',', 'height', '=', 'value', '.', 'height', ')']
Handles the marshalling from Neo4J POINT to NeomodelPoint :param value: Value returned from the database :type value: Neo4J POINT :return: NeomodelPoint
['Handles', 'the', 'marshalling', 'from', 'Neo4J', 'POINT', 'to', 'NeomodelPoint']
train
https://github.com/neo4j-contrib/neomodel/blob/cca5de4c4e90998293558b871b1b529095c91a38/neomodel/contrib/spatial_properties.py#L280-L311
5,231
erinn/comodo_api
comodo_api/comodo_api.py
ComodoTLSService.get_cert_types
def get_cert_types(self): """ Collect the certificate types that are available to the customer. :return: A list of dictionaries of certificate types :rtype: list """ result = self.client.service.getCustomerCertTypes(authData=self.auth) if result.statusCode == 0: return jsend.success({'cert_types': result.types}) else: return self._create_error(result.statusCode)
python
def get_cert_types(self): """ Collect the certificate types that are available to the customer. :return: A list of dictionaries of certificate types :rtype: list """ result = self.client.service.getCustomerCertTypes(authData=self.auth) if result.statusCode == 0: return jsend.success({'cert_types': result.types}) else: return self._create_error(result.statusCode)
['def', 'get_cert_types', '(', 'self', ')', ':', 'result', '=', 'self', '.', 'client', '.', 'service', '.', 'getCustomerCertTypes', '(', 'authData', '=', 'self', '.', 'auth', ')', 'if', 'result', '.', 'statusCode', '==', '0', ':', 'return', 'jsend', '.', 'success', '(', '{', "'cert_types'", ':', 'result', '.', 'types', '}', ')', 'else', ':', 'return', 'self', '.', '_create_error', '(', 'result', '.', 'statusCode', ')']
Collect the certificate types that are available to the customer. :return: A list of dictionaries of certificate types :rtype: list
['Collect', 'the', 'certificate', 'types', 'that', 'are', 'available', 'to', 'the', 'customer', '.']
train
https://github.com/erinn/comodo_api/blob/ddc2f0b487cab27cf6af1ad6a2deee17da11a1dd/comodo_api/comodo_api.py#L133-L145
5,232
edx/edx-django-release-util
scripts/update_repos_version.py
bump_repos_version
def bump_repos_version(module_name, new_version, local_only): """ Changes the pinned version number in the requirements files of all repos which have the specified Python module as a dependency. This script assumes that GITHUB_TOKEN is set for GitHub authentication. """ # Make the cloning directory and change directories into it. tmp_dir = tempfile.mkdtemp(dir=os.getcwd()) # Iterate through each repository. for owner, repo_name in REPOS_TO_CHANGE: repo_url = REPO_URL_FORMAT.format(owner, repo_name) gh = GitHubApiUtils(owner, repo_name) os.chdir(tmp_dir) # Clone the repo. ret_code = subprocess.call(['git', 'clone', '{}.git'.format(repo_url)]) if ret_code: logging.error('Failed to clone repo {}'.format(repo_url)) continue # Change into the cloned repo dir. os.chdir(repo_name) # Create a branch, using the version number. branch_name = '{}/{}'.format(module_name, new_version) ret_code = subprocess.call(['git', 'checkout', '-b', branch_name]) if ret_code: logging.error('Failed to create branch in repo {}'.format(repo_url)) continue # Search through all TXT files to find all lines with the module name, changing the pinned version. files_changed = False for root, _dirs, files in os.walk('.'): for file in files: if file.endswith('.txt') and (('requirements' in file) or ('requirements' in root)): found = False filepath = os.path.join(root, file) with open(filepath) as f: if '{}=='.format(module_name) in f.read(): found = True if found: files_changed = True # Change the file in-place. for line in fileinput.input(filepath, inplace=True): if '{}=='.format(module_name) in line: print '{}=={}'.format(module_name, new_version) else: print line, if not files_changed: # Module name wasn't found in the requirements files. logging.info("Module name '{}' not found in repo {} - skipping.".format(module_name, repo_url)) continue # Add/commit the files. ret_code = subprocess.call(['git', 'commit', '-am', 'Updating {} requirement to version {}'.format(module_name, new_version)]) if ret_code: logging.error("Failed to add and commit changed files to repo {}".format(repo_url)) continue if local_only: # For local_only, don't push the branch to the remote and create the PR - leave all changes local for review. continue # Push the branch. ret_code = subprocess.call(['git', 'push', '--set-upstream', 'origin', branch_name]) if ret_code: logging.error("Failed to push branch {} upstream for repo {}".format(branch_name, repo_url)) continue # Create a PR with an automated message. rollback_branch_push = False try: # The GitHub "mention" below does not work via the API - unfortunately... response = gh.create_pull( title='Change {} version.'.format(module_name), body='Change the required version of {} to {}.\n\n@edx-ops/pipeline-team Please review and tag appropriate parties.'.format(module_name, new_version), head=branch_name, base='master' ) except: logging.error('Failed to create PR for repo {} - did you set GITHUB_TOKEN?'.format(repo_url)) rollback_branch_push = True else: logging.info('Created PR #{} for repo {}: {}'.format(response.number, repo_url, response.html_url)) if rollback_branch_push: # Since the PR creation failed, delete the branch in the remote repo as well. ret_code = subprocess.call(['git', 'push', 'origin', '--delete', branch_name]) if ret_code: logging.error("ROLLBACK: Failed to delete upstream branch {} for repo {}".format(branch_name, repo_url)) if not local_only: # Remove the temp directory containing all the cloned repos. shutil.rmtree(tmp_dir)
python
def bump_repos_version(module_name, new_version, local_only): """ Changes the pinned version number in the requirements files of all repos which have the specified Python module as a dependency. This script assumes that GITHUB_TOKEN is set for GitHub authentication. """ # Make the cloning directory and change directories into it. tmp_dir = tempfile.mkdtemp(dir=os.getcwd()) # Iterate through each repository. for owner, repo_name in REPOS_TO_CHANGE: repo_url = REPO_URL_FORMAT.format(owner, repo_name) gh = GitHubApiUtils(owner, repo_name) os.chdir(tmp_dir) # Clone the repo. ret_code = subprocess.call(['git', 'clone', '{}.git'.format(repo_url)]) if ret_code: logging.error('Failed to clone repo {}'.format(repo_url)) continue # Change into the cloned repo dir. os.chdir(repo_name) # Create a branch, using the version number. branch_name = '{}/{}'.format(module_name, new_version) ret_code = subprocess.call(['git', 'checkout', '-b', branch_name]) if ret_code: logging.error('Failed to create branch in repo {}'.format(repo_url)) continue # Search through all TXT files to find all lines with the module name, changing the pinned version. files_changed = False for root, _dirs, files in os.walk('.'): for file in files: if file.endswith('.txt') and (('requirements' in file) or ('requirements' in root)): found = False filepath = os.path.join(root, file) with open(filepath) as f: if '{}=='.format(module_name) in f.read(): found = True if found: files_changed = True # Change the file in-place. for line in fileinput.input(filepath, inplace=True): if '{}=='.format(module_name) in line: print '{}=={}'.format(module_name, new_version) else: print line, if not files_changed: # Module name wasn't found in the requirements files. logging.info("Module name '{}' not found in repo {} - skipping.".format(module_name, repo_url)) continue # Add/commit the files. ret_code = subprocess.call(['git', 'commit', '-am', 'Updating {} requirement to version {}'.format(module_name, new_version)]) if ret_code: logging.error("Failed to add and commit changed files to repo {}".format(repo_url)) continue if local_only: # For local_only, don't push the branch to the remote and create the PR - leave all changes local for review. continue # Push the branch. ret_code = subprocess.call(['git', 'push', '--set-upstream', 'origin', branch_name]) if ret_code: logging.error("Failed to push branch {} upstream for repo {}".format(branch_name, repo_url)) continue # Create a PR with an automated message. rollback_branch_push = False try: # The GitHub "mention" below does not work via the API - unfortunately... response = gh.create_pull( title='Change {} version.'.format(module_name), body='Change the required version of {} to {}.\n\n@edx-ops/pipeline-team Please review and tag appropriate parties.'.format(module_name, new_version), head=branch_name, base='master' ) except: logging.error('Failed to create PR for repo {} - did you set GITHUB_TOKEN?'.format(repo_url)) rollback_branch_push = True else: logging.info('Created PR #{} for repo {}: {}'.format(response.number, repo_url, response.html_url)) if rollback_branch_push: # Since the PR creation failed, delete the branch in the remote repo as well. ret_code = subprocess.call(['git', 'push', 'origin', '--delete', branch_name]) if ret_code: logging.error("ROLLBACK: Failed to delete upstream branch {} for repo {}".format(branch_name, repo_url)) if not local_only: # Remove the temp directory containing all the cloned repos. shutil.rmtree(tmp_dir)
['def', 'bump_repos_version', '(', 'module_name', ',', 'new_version', ',', 'local_only', ')', ':', '# Make the cloning directory and change directories into it.', 'tmp_dir', '=', 'tempfile', '.', 'mkdtemp', '(', 'dir', '=', 'os', '.', 'getcwd', '(', ')', ')', '# Iterate through each repository.', 'for', 'owner', ',', 'repo_name', 'in', 'REPOS_TO_CHANGE', ':', 'repo_url', '=', 'REPO_URL_FORMAT', '.', 'format', '(', 'owner', ',', 'repo_name', ')', 'gh', '=', 'GitHubApiUtils', '(', 'owner', ',', 'repo_name', ')', 'os', '.', 'chdir', '(', 'tmp_dir', ')', '# Clone the repo.', 'ret_code', '=', 'subprocess', '.', 'call', '(', '[', "'git'", ',', "'clone'", ',', "'{}.git'", '.', 'format', '(', 'repo_url', ')', ']', ')', 'if', 'ret_code', ':', 'logging', '.', 'error', '(', "'Failed to clone repo {}'", '.', 'format', '(', 'repo_url', ')', ')', 'continue', '# Change into the cloned repo dir.', 'os', '.', 'chdir', '(', 'repo_name', ')', '# Create a branch, using the version number.', 'branch_name', '=', "'{}/{}'", '.', 'format', '(', 'module_name', ',', 'new_version', ')', 'ret_code', '=', 'subprocess', '.', 'call', '(', '[', "'git'", ',', "'checkout'", ',', "'-b'", ',', 'branch_name', ']', ')', 'if', 'ret_code', ':', 'logging', '.', 'error', '(', "'Failed to create branch in repo {}'", '.', 'format', '(', 'repo_url', ')', ')', 'continue', '# Search through all TXT files to find all lines with the module name, changing the pinned version.', 'files_changed', '=', 'False', 'for', 'root', ',', '_dirs', ',', 'files', 'in', 'os', '.', 'walk', '(', "'.'", ')', ':', 'for', 'file', 'in', 'files', ':', 'if', 'file', '.', 'endswith', '(', "'.txt'", ')', 'and', '(', '(', "'requirements'", 'in', 'file', ')', 'or', '(', "'requirements'", 'in', 'root', ')', ')', ':', 'found', '=', 'False', 'filepath', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'file', ')', 'with', 'open', '(', 'filepath', ')', 'as', 'f', ':', 'if', "'{}=='", '.', 'format', '(', 'module_name', ')', 'in', 'f', '.', 'read', '(', ')', ':', 'found', '=', 'True', 'if', 'found', ':', 'files_changed', '=', 'True', '# Change the file in-place.', 'for', 'line', 'in', 'fileinput', '.', 'input', '(', 'filepath', ',', 'inplace', '=', 'True', ')', ':', 'if', "'{}=='", '.', 'format', '(', 'module_name', ')', 'in', 'line', ':', 'print', "'{}=={}'", '.', 'format', '(', 'module_name', ',', 'new_version', ')', 'else', ':', 'print', 'line', ',', 'if', 'not', 'files_changed', ':', "# Module name wasn't found in the requirements files.", 'logging', '.', 'info', '(', '"Module name \'{}\' not found in repo {} - skipping."', '.', 'format', '(', 'module_name', ',', 'repo_url', ')', ')', 'continue', '# Add/commit the files.', 'ret_code', '=', 'subprocess', '.', 'call', '(', '[', "'git'", ',', "'commit'", ',', "'-am'", ',', "'Updating {} requirement to version {}'", '.', 'format', '(', 'module_name', ',', 'new_version', ')', ']', ')', 'if', 'ret_code', ':', 'logging', '.', 'error', '(', '"Failed to add and commit changed files to repo {}"', '.', 'format', '(', 'repo_url', ')', ')', 'continue', 'if', 'local_only', ':', "# For local_only, don't push the branch to the remote and create the PR - leave all changes local for review.", 'continue', '# Push the branch.', 'ret_code', '=', 'subprocess', '.', 'call', '(', '[', "'git'", ',', "'push'", ',', "'--set-upstream'", ',', "'origin'", ',', 'branch_name', ']', ')', 'if', 'ret_code', ':', 'logging', '.', 'error', '(', '"Failed to push branch {} upstream for repo {}"', '.', 'format', '(', 'branch_name', ',', 'repo_url', ')', ')', 'continue', '# Create a PR with an automated message.', 'rollback_branch_push', '=', 'False', 'try', ':', '# The GitHub "mention" below does not work via the API - unfortunately...', 'response', '=', 'gh', '.', 'create_pull', '(', 'title', '=', "'Change {} version.'", '.', 'format', '(', 'module_name', ')', ',', 'body', '=', "'Change the required version of {} to {}.\\n\\n@edx-ops/pipeline-team Please review and tag appropriate parties.'", '.', 'format', '(', 'module_name', ',', 'new_version', ')', ',', 'head', '=', 'branch_name', ',', 'base', '=', "'master'", ')', 'except', ':', 'logging', '.', 'error', '(', "'Failed to create PR for repo {} - did you set GITHUB_TOKEN?'", '.', 'format', '(', 'repo_url', ')', ')', 'rollback_branch_push', '=', 'True', 'else', ':', 'logging', '.', 'info', '(', "'Created PR #{} for repo {}: {}'", '.', 'format', '(', 'response', '.', 'number', ',', 'repo_url', ',', 'response', '.', 'html_url', ')', ')', 'if', 'rollback_branch_push', ':', '# Since the PR creation failed, delete the branch in the remote repo as well.', 'ret_code', '=', 'subprocess', '.', 'call', '(', '[', "'git'", ',', "'push'", ',', "'origin'", ',', "'--delete'", ',', 'branch_name', ']', ')', 'if', 'ret_code', ':', 'logging', '.', 'error', '(', '"ROLLBACK: Failed to delete upstream branch {} for repo {}"', '.', 'format', '(', 'branch_name', ',', 'repo_url', ')', ')', 'if', 'not', 'local_only', ':', '# Remove the temp directory containing all the cloned repos.', 'shutil', '.', 'rmtree', '(', 'tmp_dir', ')']
Changes the pinned version number in the requirements files of all repos which have the specified Python module as a dependency. This script assumes that GITHUB_TOKEN is set for GitHub authentication.
['Changes', 'the', 'pinned', 'version', 'number', 'in', 'the', 'requirements', 'files', 'of', 'all', 'repos', 'which', 'have', 'the', 'specified', 'Python', 'module', 'as', 'a', 'dependency', '.']
train
https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/scripts/update_repos_version.py#L55-L153
5,233
numberoverzero/bloop
bloop/stream/coordinator.py
Coordinator.advance_shards
def advance_shards(self): """Poll active shards for records and insert them into the buffer. Rotate exhausted shards. Returns immediately if the buffer isn't empty. """ # Don't poll shards when there are pending records. if self.buffer: return # 0) Collect new records from all active shards. record_shard_pairs = [] for shard in self.active: records = next(shard) if records: record_shard_pairs.extend((record, shard) for record in records) self.buffer.push_all(record_shard_pairs) self.migrate_closed_shards()
python
def advance_shards(self): """Poll active shards for records and insert them into the buffer. Rotate exhausted shards. Returns immediately if the buffer isn't empty. """ # Don't poll shards when there are pending records. if self.buffer: return # 0) Collect new records from all active shards. record_shard_pairs = [] for shard in self.active: records = next(shard) if records: record_shard_pairs.extend((record, shard) for record in records) self.buffer.push_all(record_shard_pairs) self.migrate_closed_shards()
['def', 'advance_shards', '(', 'self', ')', ':', "# Don't poll shards when there are pending records.", 'if', 'self', '.', 'buffer', ':', 'return', '# 0) Collect new records from all active shards.', 'record_shard_pairs', '=', '[', ']', 'for', 'shard', 'in', 'self', '.', 'active', ':', 'records', '=', 'next', '(', 'shard', ')', 'if', 'records', ':', 'record_shard_pairs', '.', 'extend', '(', '(', 'record', ',', 'shard', ')', 'for', 'record', 'in', 'records', ')', 'self', '.', 'buffer', '.', 'push_all', '(', 'record_shard_pairs', ')', 'self', '.', 'migrate_closed_shards', '(', ')']
Poll active shards for records and insert them into the buffer. Rotate exhausted shards. Returns immediately if the buffer isn't empty.
['Poll', 'active', 'shards', 'for', 'records', 'and', 'insert', 'them', 'into', 'the', 'buffer', '.', 'Rotate', 'exhausted', 'shards', '.']
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/coordinator.py#L77-L94
5,234
cltk/cltk
cltk/text_reuse/automata.py
DeterministicFiniteAutomaton.complete_automaton
def complete_automaton(self): """ Adds missing transition states such that δ(q, u) is defined for every state q and any u ∈ S """ self.term_state = object() self.Q.add(self.term_state) for tv in self.Q: for u in self.S: try: self.transition[tv][u] except: self.add_transition(tv, u, self.term_state) for u in self.S: self.add_transition(self.term_state, u, self.term_state)
python
def complete_automaton(self): """ Adds missing transition states such that δ(q, u) is defined for every state q and any u ∈ S """ self.term_state = object() self.Q.add(self.term_state) for tv in self.Q: for u in self.S: try: self.transition[tv][u] except: self.add_transition(tv, u, self.term_state) for u in self.S: self.add_transition(self.term_state, u, self.term_state)
['def', 'complete_automaton', '(', 'self', ')', ':', 'self', '.', 'term_state', '=', 'object', '(', ')', 'self', '.', 'Q', '.', 'add', '(', 'self', '.', 'term_state', ')', 'for', 'tv', 'in', 'self', '.', 'Q', ':', 'for', 'u', 'in', 'self', '.', 'S', ':', 'try', ':', 'self', '.', 'transition', '[', 'tv', ']', '[', 'u', ']', 'except', ':', 'self', '.', 'add_transition', '(', 'tv', ',', 'u', ',', 'self', '.', 'term_state', ')', 'for', 'u', 'in', 'self', '.', 'S', ':', 'self', '.', 'add_transition', '(', 'self', '.', 'term_state', ',', 'u', ',', 'self', '.', 'term_state', ')']
Adds missing transition states such that δ(q, u) is defined for every state q and any u ∈ S
['Adds', 'missing', 'transition', 'states', 'such', 'that', 'δ', '(', 'q', 'u', ')', 'is', 'defined', 'for', 'every', 'state', 'q', 'and', 'any', 'u', '∈', 'S']
train
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/text_reuse/automata.py#L240-L257
5,235
pywbem/pywbem
attic/twisted_client.py
WBEMClient.connectionMade
def connectionMade(self): """Send a HTTP POST command with the appropriate CIM over HTTP headers and payload.""" self.factory.request_xml = str(self.factory.payload) self.sendCommand('POST', '/cimom') self.sendHeader('Host', '%s:%d' % (self.transport.addr[0], self.transport.addr[1])) self.sendHeader('User-Agent', 'pywbem/twisted') self.sendHeader('Content-length', len(self.factory.payload)) self.sendHeader('Content-type', 'application/xml') if self.factory.creds: auth = base64.b64encode('%s:%s' % (self.factory.creds[0], self.factory.creds[1])) self.sendHeader('Authorization', 'Basic %s' % auth) self.sendHeader('CIMOperation', str(self.factory.operation)) self.sendHeader('CIMMethod', str(self.factory.method)) self.sendHeader('CIMObject', str(self.factory.object)) self.endHeaders() # TODO: Figure out why twisted doesn't support unicode. An # exception should be thrown by the str() call if the payload # can't be converted to the current codepage. self.transport.write(str(self.factory.payload))
python
def connectionMade(self): """Send a HTTP POST command with the appropriate CIM over HTTP headers and payload.""" self.factory.request_xml = str(self.factory.payload) self.sendCommand('POST', '/cimom') self.sendHeader('Host', '%s:%d' % (self.transport.addr[0], self.transport.addr[1])) self.sendHeader('User-Agent', 'pywbem/twisted') self.sendHeader('Content-length', len(self.factory.payload)) self.sendHeader('Content-type', 'application/xml') if self.factory.creds: auth = base64.b64encode('%s:%s' % (self.factory.creds[0], self.factory.creds[1])) self.sendHeader('Authorization', 'Basic %s' % auth) self.sendHeader('CIMOperation', str(self.factory.operation)) self.sendHeader('CIMMethod', str(self.factory.method)) self.sendHeader('CIMObject', str(self.factory.object)) self.endHeaders() # TODO: Figure out why twisted doesn't support unicode. An # exception should be thrown by the str() call if the payload # can't be converted to the current codepage. self.transport.write(str(self.factory.payload))
['def', 'connectionMade', '(', 'self', ')', ':', 'self', '.', 'factory', '.', 'request_xml', '=', 'str', '(', 'self', '.', 'factory', '.', 'payload', ')', 'self', '.', 'sendCommand', '(', "'POST'", ',', "'/cimom'", ')', 'self', '.', 'sendHeader', '(', "'Host'", ',', "'%s:%d'", '%', '(', 'self', '.', 'transport', '.', 'addr', '[', '0', ']', ',', 'self', '.', 'transport', '.', 'addr', '[', '1', ']', ')', ')', 'self', '.', 'sendHeader', '(', "'User-Agent'", ',', "'pywbem/twisted'", ')', 'self', '.', 'sendHeader', '(', "'Content-length'", ',', 'len', '(', 'self', '.', 'factory', '.', 'payload', ')', ')', 'self', '.', 'sendHeader', '(', "'Content-type'", ',', "'application/xml'", ')', 'if', 'self', '.', 'factory', '.', 'creds', ':', 'auth', '=', 'base64', '.', 'b64encode', '(', "'%s:%s'", '%', '(', 'self', '.', 'factory', '.', 'creds', '[', '0', ']', ',', 'self', '.', 'factory', '.', 'creds', '[', '1', ']', ')', ')', 'self', '.', 'sendHeader', '(', "'Authorization'", ',', "'Basic %s'", '%', 'auth', ')', 'self', '.', 'sendHeader', '(', "'CIMOperation'", ',', 'str', '(', 'self', '.', 'factory', '.', 'operation', ')', ')', 'self', '.', 'sendHeader', '(', "'CIMMethod'", ',', 'str', '(', 'self', '.', 'factory', '.', 'method', ')', ')', 'self', '.', 'sendHeader', '(', "'CIMObject'", ',', 'str', '(', 'self', '.', 'factory', '.', 'object', ')', ')', 'self', '.', 'endHeaders', '(', ')', "# TODO: Figure out why twisted doesn't support unicode. An", '# exception should be thrown by the str() call if the payload', "# can't be converted to the current codepage.", 'self', '.', 'transport', '.', 'write', '(', 'str', '(', 'self', '.', 'factory', '.', 'payload', ')', ')']
Send a HTTP POST command with the appropriate CIM over HTTP headers and payload.
['Send', 'a', 'HTTP', 'POST', 'command', 'with', 'the', 'appropriate', 'CIM', 'over', 'HTTP', 'headers', 'and', 'payload', '.']
train
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/twisted_client.py#L58-L88
5,236
plotly/octogrid
octogrid/generator/generator.py
generate_network
def generate_network(user=None, reset=False): """ Assemble the network connections for a given user """ token = collect_token() try: gh = login(token=token) root_user = gh.user(user) except Exception, e: # Failed to login using the token, github3.models.GitHubError raise e graph_nodes = [] graph_edges = [] username = user if user is not None else root_user.login if not is_cached(username_to_file(username)) or reset: graph_nodes.append(username) # @TODO: take care of the 'rate limit exceeding' if imposed try: for person in gh.iter_following(username): graph_nodes.append(str(person)) graph_edges.append((root_user.login, str(person))) for i in range(1, root_user.following): user = gh.user(graph_nodes[i]) user_following_edges = [(user.login, str(person)) for person in gh.iter_following( user) if str(person) in graph_nodes] graph_edges += user_following_edges except Exception, e: raise e generate_gml(username, graph_nodes, graph_edges, True) else: reuse_gml(username) return username
python
def generate_network(user=None, reset=False): """ Assemble the network connections for a given user """ token = collect_token() try: gh = login(token=token) root_user = gh.user(user) except Exception, e: # Failed to login using the token, github3.models.GitHubError raise e graph_nodes = [] graph_edges = [] username = user if user is not None else root_user.login if not is_cached(username_to_file(username)) or reset: graph_nodes.append(username) # @TODO: take care of the 'rate limit exceeding' if imposed try: for person in gh.iter_following(username): graph_nodes.append(str(person)) graph_edges.append((root_user.login, str(person))) for i in range(1, root_user.following): user = gh.user(graph_nodes[i]) user_following_edges = [(user.login, str(person)) for person in gh.iter_following( user) if str(person) in graph_nodes] graph_edges += user_following_edges except Exception, e: raise e generate_gml(username, graph_nodes, graph_edges, True) else: reuse_gml(username) return username
['def', 'generate_network', '(', 'user', '=', 'None', ',', 'reset', '=', 'False', ')', ':', 'token', '=', 'collect_token', '(', ')', 'try', ':', 'gh', '=', 'login', '(', 'token', '=', 'token', ')', 'root_user', '=', 'gh', '.', 'user', '(', 'user', ')', 'except', 'Exception', ',', 'e', ':', '# Failed to login using the token, github3.models.GitHubError', 'raise', 'e', 'graph_nodes', '=', '[', ']', 'graph_edges', '=', '[', ']', 'username', '=', 'user', 'if', 'user', 'is', 'not', 'None', 'else', 'root_user', '.', 'login', 'if', 'not', 'is_cached', '(', 'username_to_file', '(', 'username', ')', ')', 'or', 'reset', ':', 'graph_nodes', '.', 'append', '(', 'username', ')', "# @TODO: take care of the 'rate limit exceeding' if imposed", 'try', ':', 'for', 'person', 'in', 'gh', '.', 'iter_following', '(', 'username', ')', ':', 'graph_nodes', '.', 'append', '(', 'str', '(', 'person', ')', ')', 'graph_edges', '.', 'append', '(', '(', 'root_user', '.', 'login', ',', 'str', '(', 'person', ')', ')', ')', 'for', 'i', 'in', 'range', '(', '1', ',', 'root_user', '.', 'following', ')', ':', 'user', '=', 'gh', '.', 'user', '(', 'graph_nodes', '[', 'i', ']', ')', 'user_following_edges', '=', '[', '(', 'user', '.', 'login', ',', 'str', '(', 'person', ')', ')', 'for', 'person', 'in', 'gh', '.', 'iter_following', '(', 'user', ')', 'if', 'str', '(', 'person', ')', 'in', 'graph_nodes', ']', 'graph_edges', '+=', 'user_following_edges', 'except', 'Exception', ',', 'e', ':', 'raise', 'e', 'generate_gml', '(', 'username', ',', 'graph_nodes', ',', 'graph_edges', ',', 'True', ')', 'else', ':', 'reuse_gml', '(', 'username', ')', 'return', 'username']
Assemble the network connections for a given user
['Assemble', 'the', 'network', 'connections', 'for', 'a', 'given', 'user']
train
https://github.com/plotly/octogrid/blob/46237a72c79765fe5a48af7065049c692e6457a7/octogrid/generator/generator.py#L29-L69
5,237
CI-WATER/gsshapy
gsshapy/orm/prj.py
ProjectFile._readXputMaps
def _readXputMaps(self, mapCards, directory, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None): """ GSSHA Project Read Map Files from File Method """ if self.mapType in self.MAP_TYPES_SUPPORTED: for card in self.projectCards: if (card.name in mapCards) and self._noneOrNumValue(card.value): filename = card.value.strip('"') # Invoke read method on each map self._invokeRead(fileIO=RasterMapFile, directory=directory, filename=filename, session=session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile) else: for card in self.projectCards: if (card.name in mapCards) and self._noneOrNumValue(card.value): filename = card.value.strip('"') fileExtension = filename.split('.')[1] if fileExtension in self.ALWAYS_READ_AND_WRITE_MAPS: # Invoke read method on each map self._invokeRead(fileIO=RasterMapFile, directory=directory, filename=filename, session=session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile) log.warning('Could not read map files. ' 'MAP_TYPE {0} not supported.'.format(self.mapType))
python
def _readXputMaps(self, mapCards, directory, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None): """ GSSHA Project Read Map Files from File Method """ if self.mapType in self.MAP_TYPES_SUPPORTED: for card in self.projectCards: if (card.name in mapCards) and self._noneOrNumValue(card.value): filename = card.value.strip('"') # Invoke read method on each map self._invokeRead(fileIO=RasterMapFile, directory=directory, filename=filename, session=session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile) else: for card in self.projectCards: if (card.name in mapCards) and self._noneOrNumValue(card.value): filename = card.value.strip('"') fileExtension = filename.split('.')[1] if fileExtension in self.ALWAYS_READ_AND_WRITE_MAPS: # Invoke read method on each map self._invokeRead(fileIO=RasterMapFile, directory=directory, filename=filename, session=session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile) log.warning('Could not read map files. ' 'MAP_TYPE {0} not supported.'.format(self.mapType))
['def', '_readXputMaps', '(', 'self', ',', 'mapCards', ',', 'directory', ',', 'session', ',', 'spatial', '=', 'False', ',', 'spatialReferenceID', '=', '4236', ',', 'replaceParamFile', '=', 'None', ')', ':', 'if', 'self', '.', 'mapType', 'in', 'self', '.', 'MAP_TYPES_SUPPORTED', ':', 'for', 'card', 'in', 'self', '.', 'projectCards', ':', 'if', '(', 'card', '.', 'name', 'in', 'mapCards', ')', 'and', 'self', '.', '_noneOrNumValue', '(', 'card', '.', 'value', ')', ':', 'filename', '=', 'card', '.', 'value', '.', 'strip', '(', '\'"\'', ')', '# Invoke read method on each map', 'self', '.', '_invokeRead', '(', 'fileIO', '=', 'RasterMapFile', ',', 'directory', '=', 'directory', ',', 'filename', '=', 'filename', ',', 'session', '=', 'session', ',', 'spatial', '=', 'spatial', ',', 'spatialReferenceID', '=', 'spatialReferenceID', ',', 'replaceParamFile', '=', 'replaceParamFile', ')', 'else', ':', 'for', 'card', 'in', 'self', '.', 'projectCards', ':', 'if', '(', 'card', '.', 'name', 'in', 'mapCards', ')', 'and', 'self', '.', '_noneOrNumValue', '(', 'card', '.', 'value', ')', ':', 'filename', '=', 'card', '.', 'value', '.', 'strip', '(', '\'"\'', ')', 'fileExtension', '=', 'filename', '.', 'split', '(', "'.'", ')', '[', '1', ']', 'if', 'fileExtension', 'in', 'self', '.', 'ALWAYS_READ_AND_WRITE_MAPS', ':', '# Invoke read method on each map', 'self', '.', '_invokeRead', '(', 'fileIO', '=', 'RasterMapFile', ',', 'directory', '=', 'directory', ',', 'filename', '=', 'filename', ',', 'session', '=', 'session', ',', 'spatial', '=', 'spatial', ',', 'spatialReferenceID', '=', 'spatialReferenceID', ',', 'replaceParamFile', '=', 'replaceParamFile', ')', 'log', '.', 'warning', '(', "'Could not read map files. '", "'MAP_TYPE {0} not supported.'", '.', 'format', '(', 'self', '.', 'mapType', ')', ')']
GSSHA Project Read Map Files from File Method
['GSSHA', 'Project', 'Read', 'Map', 'Files', 'from', 'File', 'Method']
train
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1468-L1501
5,238
fedora-infra/fedora-messaging
fedora_messaging/message.py
get_class
def get_class(schema_name): """ Retrieve the message class associated with the schema name. If no match is found, the default schema is returned and a warning is logged. Args: schema_name (six.text_type): The name of the :class:`Message` sub-class; this is typically the Python path. Returns: Message: A sub-class of :class:`Message` to create the message from. """ global _registry_loaded if not _registry_loaded: load_message_classes() try: return _schema_name_to_class[schema_name] except KeyError: _log.warning( 'The schema "%s" is not in the schema registry! Either install ' "the package with its schema definition or define a schema. " "Falling back to the default schema...", schema_name, ) return Message
python
def get_class(schema_name): """ Retrieve the message class associated with the schema name. If no match is found, the default schema is returned and a warning is logged. Args: schema_name (six.text_type): The name of the :class:`Message` sub-class; this is typically the Python path. Returns: Message: A sub-class of :class:`Message` to create the message from. """ global _registry_loaded if not _registry_loaded: load_message_classes() try: return _schema_name_to_class[schema_name] except KeyError: _log.warning( 'The schema "%s" is not in the schema registry! Either install ' "the package with its schema definition or define a schema. " "Falling back to the default schema...", schema_name, ) return Message
['def', 'get_class', '(', 'schema_name', ')', ':', 'global', '_registry_loaded', 'if', 'not', '_registry_loaded', ':', 'load_message_classes', '(', ')', 'try', ':', 'return', '_schema_name_to_class', '[', 'schema_name', ']', 'except', 'KeyError', ':', '_log', '.', 'warning', '(', '\'The schema "%s" is not in the schema registry! Either install \'', '"the package with its schema definition or define a schema. "', '"Falling back to the default schema..."', ',', 'schema_name', ',', ')', 'return', 'Message']
Retrieve the message class associated with the schema name. If no match is found, the default schema is returned and a warning is logged. Args: schema_name (six.text_type): The name of the :class:`Message` sub-class; this is typically the Python path. Returns: Message: A sub-class of :class:`Message` to create the message from.
['Retrieve', 'the', 'message', 'class', 'associated', 'with', 'the', 'schema', 'name', '.']
train
https://github.com/fedora-infra/fedora-messaging/blob/be3e88534e2b15d579bcd24f9c4b7e795cb7e0b7/fedora_messaging/message.py#L74-L100
5,239
idlesign/uwsgiconf
uwsgiconf/utils.py
ConfModule.load
def load(cls, fpath): """Loads a module and returns its object. :param str|unicode fpath: :rtype: module """ module_name = os.path.splitext(os.path.basename(fpath))[0] sys.path.insert(0, os.path.dirname(fpath)) try: module = import_module(module_name) finally: sys.path = sys.path[1:] return module
python
def load(cls, fpath): """Loads a module and returns its object. :param str|unicode fpath: :rtype: module """ module_name = os.path.splitext(os.path.basename(fpath))[0] sys.path.insert(0, os.path.dirname(fpath)) try: module = import_module(module_name) finally: sys.path = sys.path[1:] return module
['def', 'load', '(', 'cls', ',', 'fpath', ')', ':', 'module_name', '=', 'os', '.', 'path', '.', 'splitext', '(', 'os', '.', 'path', '.', 'basename', '(', 'fpath', ')', ')', '[', '0', ']', 'sys', '.', 'path', '.', 'insert', '(', '0', ',', 'os', '.', 'path', '.', 'dirname', '(', 'fpath', ')', ')', 'try', ':', 'module', '=', 'import_module', '(', 'module_name', ')', 'finally', ':', 'sys', '.', 'path', '=', 'sys', '.', 'path', '[', '1', ':', ']', 'return', 'module']
Loads a module and returns its object. :param str|unicode fpath: :rtype: module
['Loads', 'a', 'module', 'and', 'returns', 'its', 'object', '.']
train
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/utils.py#L129-L144
5,240
tilezen/mapbox-vector-tile
mapbox_vector_tile/optimise.py
_decode_lines
def _decode_lines(geom): """ Decode a linear MVT geometry into a list of Lines. Each individual linestring in the MVT is extracted to a separate entry in the list of lines. """ lines = [] current_line = [] current_moveto = None # to keep track of the position. we'll adapt the move-to commands to all # be relative to 0,0 at the beginning of each linestring. x = 0 y = 0 end = len(geom) i = 0 while i < end: header = geom[i] cmd = header & 7 run_length = header // 8 if cmd == 1: # move to # flush previous line. if current_moveto: lines.append(Line(current_moveto, EndsAt(x, y), current_line)) current_line = [] assert run_length == 1 x += unzigzag(geom[i+1]) y += unzigzag(geom[i+2]) i += 3 current_moveto = MoveTo(x, y) elif cmd == 2: # line to assert current_moveto # we just copy this run, since it's encoding isn't going to change next_i = i + 1 + run_length * 2 current_line.extend(geom[i:next_i]) # but we still need to decode it to figure out where each move-to # command is in absolute space. for j in xrange(0, run_length): dx = unzigzag(geom[i + 1 + 2 * j]) dy = unzigzag(geom[i + 2 + 2 * j]) x += dx y += dy i = next_i else: raise ValueError('Unhandled command: %d' % cmd) if current_line: assert current_moveto lines.append(Line(current_moveto, EndsAt(x, y), current_line)) return lines
python
def _decode_lines(geom): """ Decode a linear MVT geometry into a list of Lines. Each individual linestring in the MVT is extracted to a separate entry in the list of lines. """ lines = [] current_line = [] current_moveto = None # to keep track of the position. we'll adapt the move-to commands to all # be relative to 0,0 at the beginning of each linestring. x = 0 y = 0 end = len(geom) i = 0 while i < end: header = geom[i] cmd = header & 7 run_length = header // 8 if cmd == 1: # move to # flush previous line. if current_moveto: lines.append(Line(current_moveto, EndsAt(x, y), current_line)) current_line = [] assert run_length == 1 x += unzigzag(geom[i+1]) y += unzigzag(geom[i+2]) i += 3 current_moveto = MoveTo(x, y) elif cmd == 2: # line to assert current_moveto # we just copy this run, since it's encoding isn't going to change next_i = i + 1 + run_length * 2 current_line.extend(geom[i:next_i]) # but we still need to decode it to figure out where each move-to # command is in absolute space. for j in xrange(0, run_length): dx = unzigzag(geom[i + 1 + 2 * j]) dy = unzigzag(geom[i + 2 + 2 * j]) x += dx y += dy i = next_i else: raise ValueError('Unhandled command: %d' % cmd) if current_line: assert current_moveto lines.append(Line(current_moveto, EndsAt(x, y), current_line)) return lines
['def', '_decode_lines', '(', 'geom', ')', ':', 'lines', '=', '[', ']', 'current_line', '=', '[', ']', 'current_moveto', '=', 'None', "# to keep track of the position. we'll adapt the move-to commands to all", '# be relative to 0,0 at the beginning of each linestring.', 'x', '=', '0', 'y', '=', '0', 'end', '=', 'len', '(', 'geom', ')', 'i', '=', '0', 'while', 'i', '<', 'end', ':', 'header', '=', 'geom', '[', 'i', ']', 'cmd', '=', 'header', '&', '7', 'run_length', '=', 'header', '//', '8', 'if', 'cmd', '==', '1', ':', '# move to', '# flush previous line.', 'if', 'current_moveto', ':', 'lines', '.', 'append', '(', 'Line', '(', 'current_moveto', ',', 'EndsAt', '(', 'x', ',', 'y', ')', ',', 'current_line', ')', ')', 'current_line', '=', '[', ']', 'assert', 'run_length', '==', '1', 'x', '+=', 'unzigzag', '(', 'geom', '[', 'i', '+', '1', ']', ')', 'y', '+=', 'unzigzag', '(', 'geom', '[', 'i', '+', '2', ']', ')', 'i', '+=', '3', 'current_moveto', '=', 'MoveTo', '(', 'x', ',', 'y', ')', 'elif', 'cmd', '==', '2', ':', '# line to', 'assert', 'current_moveto', "# we just copy this run, since it's encoding isn't going to change", 'next_i', '=', 'i', '+', '1', '+', 'run_length', '*', '2', 'current_line', '.', 'extend', '(', 'geom', '[', 'i', ':', 'next_i', ']', ')', '# but we still need to decode it to figure out where each move-to', '# command is in absolute space.', 'for', 'j', 'in', 'xrange', '(', '0', ',', 'run_length', ')', ':', 'dx', '=', 'unzigzag', '(', 'geom', '[', 'i', '+', '1', '+', '2', '*', 'j', ']', ')', 'dy', '=', 'unzigzag', '(', 'geom', '[', 'i', '+', '2', '+', '2', '*', 'j', ']', ')', 'x', '+=', 'dx', 'y', '+=', 'dy', 'i', '=', 'next_i', 'else', ':', 'raise', 'ValueError', '(', "'Unhandled command: %d'", '%', 'cmd', ')', 'if', 'current_line', ':', 'assert', 'current_moveto', 'lines', '.', 'append', '(', 'Line', '(', 'current_moveto', ',', 'EndsAt', '(', 'x', ',', 'y', ')', ',', 'current_line', ')', ')', 'return', 'lines']
Decode a linear MVT geometry into a list of Lines. Each individual linestring in the MVT is extracted to a separate entry in the list of lines.
['Decode', 'a', 'linear', 'MVT', 'geometry', 'into', 'a', 'list', 'of', 'Lines', '.']
train
https://github.com/tilezen/mapbox-vector-tile/blob/7327b8cff0aa2de1d5233e556bf00429ba2126a0/mapbox_vector_tile/optimise.py#L85-L146
5,241
iotile/coretools
transport_plugins/awsiot/iotile_transport_awsiot/connection_manager.py
ConnectionManager._get_connection_state
def _get_connection_state(self, conn_or_int_id): """Get a connection's state by either conn_id or internal_id This routine must only be called from the internal worker thread. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id """ key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: raise ArgumentError("You must supply either an int connection id or a string internal id to _get_connection_state", id=key) if key not in table: return self.Disconnected data = table[key] return data['state']
python
def _get_connection_state(self, conn_or_int_id): """Get a connection's state by either conn_id or internal_id This routine must only be called from the internal worker thread. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id """ key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: raise ArgumentError("You must supply either an int connection id or a string internal id to _get_connection_state", id=key) if key not in table: return self.Disconnected data = table[key] return data['state']
['def', '_get_connection_state', '(', 'self', ',', 'conn_or_int_id', ')', ':', 'key', '=', 'conn_or_int_id', 'if', 'isinstance', '(', 'key', ',', 'str', ')', ':', 'table', '=', 'self', '.', '_int_connections', 'elif', 'isinstance', '(', 'key', ',', 'int', ')', ':', 'table', '=', 'self', '.', '_connections', 'else', ':', 'raise', 'ArgumentError', '(', '"You must supply either an int connection id or a string internal id to _get_connection_state"', ',', 'id', '=', 'key', ')', 'if', 'key', 'not', 'in', 'table', ':', 'return', 'self', '.', 'Disconnected', 'data', '=', 'table', '[', 'key', ']', 'return', 'data', '[', "'state'", ']']
Get a connection's state by either conn_id or internal_id This routine must only be called from the internal worker thread. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id
['Get', 'a', 'connection', 's', 'state', 'by', 'either', 'conn_id', 'or', 'internal_id']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/connection_manager.py#L236-L258
5,242
drslump/pyshould
pyshould/matchers.py
lookup
def lookup(alias): """ Tries to find a matcher callable associated to the given alias. If an exact match does not exists it will try normalizing it and even removing underscores to find one. """ if alias in matchers: return matchers[alias] else: norm = normalize(alias) if norm in normalized: alias = normalized[norm] return matchers[alias] # Check without snake case if -1 != alias.find('_'): norm = normalize(alias).replace('_', '') return lookup(norm) return None
python
def lookup(alias): """ Tries to find a matcher callable associated to the given alias. If an exact match does not exists it will try normalizing it and even removing underscores to find one. """ if alias in matchers: return matchers[alias] else: norm = normalize(alias) if norm in normalized: alias = normalized[norm] return matchers[alias] # Check without snake case if -1 != alias.find('_'): norm = normalize(alias).replace('_', '') return lookup(norm) return None
['def', 'lookup', '(', 'alias', ')', ':', 'if', 'alias', 'in', 'matchers', ':', 'return', 'matchers', '[', 'alias', ']', 'else', ':', 'norm', '=', 'normalize', '(', 'alias', ')', 'if', 'norm', 'in', 'normalized', ':', 'alias', '=', 'normalized', '[', 'norm', ']', 'return', 'matchers', '[', 'alias', ']', '# Check without snake case', 'if', '-', '1', '!=', 'alias', '.', 'find', '(', "'_'", ')', ':', 'norm', '=', 'normalize', '(', 'alias', ')', '.', 'replace', '(', "'_'", ',', "''", ')', 'return', 'lookup', '(', 'norm', ')', 'return', 'None']
Tries to find a matcher callable associated to the given alias. If an exact match does not exists it will try normalizing it and even removing underscores to find one.
['Tries', 'to', 'find', 'a', 'matcher', 'callable', 'associated', 'to', 'the', 'given', 'alias', '.', 'If', 'an', 'exact', 'match', 'does', 'not', 'exists', 'it', 'will', 'try', 'normalizing', 'it', 'and', 'even', 'removing', 'underscores', 'to', 'find', 'one', '.']
train
https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/matchers.py#L105-L124
5,243
saltstack/salt
salt/modules/cron.py
_cron_matched
def _cron_matched(cron, cmd, identifier=None): '''Check if: - we find a cron with same cmd, old state behavior - but also be smart enough to remove states changed crons where we do not removed priorly by a cron.absent by matching on the provided identifier. We assure retrocompatibility by only checking on identifier if and only if an identifier was set on the serialized crontab ''' ret, id_matched = False, None cid = _cron_id(cron) if cid: if not identifier: identifier = SALT_CRON_NO_IDENTIFIER eidentifier = _ensure_string(identifier) # old style second round # after saving crontab, we must check that if # we have not the same command, but the default id # to not set that as a match if ( cron.get('cmd', None) != cmd and cid == SALT_CRON_NO_IDENTIFIER and eidentifier == SALT_CRON_NO_IDENTIFIER ): id_matched = False else: # on saving, be sure not to overwrite a cron # with specific identifier but also track # crons where command is the same # but with the default if that we gonna overwrite if ( cron.get('cmd', None) == cmd and cid == SALT_CRON_NO_IDENTIFIER and identifier ): cid = eidentifier id_matched = eidentifier == cid if ( ((id_matched is None) and cmd == cron.get('cmd', None)) or id_matched ): ret = True return ret
python
def _cron_matched(cron, cmd, identifier=None): '''Check if: - we find a cron with same cmd, old state behavior - but also be smart enough to remove states changed crons where we do not removed priorly by a cron.absent by matching on the provided identifier. We assure retrocompatibility by only checking on identifier if and only if an identifier was set on the serialized crontab ''' ret, id_matched = False, None cid = _cron_id(cron) if cid: if not identifier: identifier = SALT_CRON_NO_IDENTIFIER eidentifier = _ensure_string(identifier) # old style second round # after saving crontab, we must check that if # we have not the same command, but the default id # to not set that as a match if ( cron.get('cmd', None) != cmd and cid == SALT_CRON_NO_IDENTIFIER and eidentifier == SALT_CRON_NO_IDENTIFIER ): id_matched = False else: # on saving, be sure not to overwrite a cron # with specific identifier but also track # crons where command is the same # but with the default if that we gonna overwrite if ( cron.get('cmd', None) == cmd and cid == SALT_CRON_NO_IDENTIFIER and identifier ): cid = eidentifier id_matched = eidentifier == cid if ( ((id_matched is None) and cmd == cron.get('cmd', None)) or id_matched ): ret = True return ret
['def', '_cron_matched', '(', 'cron', ',', 'cmd', ',', 'identifier', '=', 'None', ')', ':', 'ret', ',', 'id_matched', '=', 'False', ',', 'None', 'cid', '=', '_cron_id', '(', 'cron', ')', 'if', 'cid', ':', 'if', 'not', 'identifier', ':', 'identifier', '=', 'SALT_CRON_NO_IDENTIFIER', 'eidentifier', '=', '_ensure_string', '(', 'identifier', ')', '# old style second round', '# after saving crontab, we must check that if', '# we have not the same command, but the default id', '# to not set that as a match', 'if', '(', 'cron', '.', 'get', '(', "'cmd'", ',', 'None', ')', '!=', 'cmd', 'and', 'cid', '==', 'SALT_CRON_NO_IDENTIFIER', 'and', 'eidentifier', '==', 'SALT_CRON_NO_IDENTIFIER', ')', ':', 'id_matched', '=', 'False', 'else', ':', '# on saving, be sure not to overwrite a cron', '# with specific identifier but also track', '# crons where command is the same', '# but with the default if that we gonna overwrite', 'if', '(', 'cron', '.', 'get', '(', "'cmd'", ',', 'None', ')', '==', 'cmd', 'and', 'cid', '==', 'SALT_CRON_NO_IDENTIFIER', 'and', 'identifier', ')', ':', 'cid', '=', 'eidentifier', 'id_matched', '=', 'eidentifier', '==', 'cid', 'if', '(', '(', '(', 'id_matched', 'is', 'None', ')', 'and', 'cmd', '==', 'cron', '.', 'get', '(', "'cmd'", ',', 'None', ')', ')', 'or', 'id_matched', ')', ':', 'ret', '=', 'True', 'return', 'ret']
Check if: - we find a cron with same cmd, old state behavior - but also be smart enough to remove states changed crons where we do not removed priorly by a cron.absent by matching on the provided identifier. We assure retrocompatibility by only checking on identifier if and only if an identifier was set on the serialized crontab
['Check', 'if', ':', '-', 'we', 'find', 'a', 'cron', 'with', 'same', 'cmd', 'old', 'state', 'behavior', '-', 'but', 'also', 'be', 'smart', 'enough', 'to', 'remove', 'states', 'changed', 'crons', 'where', 'we', 'do', 'not', 'removed', 'priorly', 'by', 'a', 'cron', '.', 'absent', 'by', 'matching', 'on', 'the', 'provided', 'identifier', '.', 'We', 'assure', 'retrocompatibility', 'by', 'only', 'checking', 'on', 'identifier', 'if', 'and', 'only', 'if', 'an', 'identifier', 'was', 'set', 'on', 'the', 'serialized', 'crontab']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cron.py#L63-L105
5,244
openego/eTraGo
etrago/tools/utilities.py
find_snapshots
def find_snapshots(network, carrier, maximum = True, minimum = True, n = 3): """ Function that returns snapshots with maximum and/or minimum feed-in of selected carrier. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA carrier: str Selected carrier of generators maximum: bool Choose if timestep of maximal feed-in is returned. minimum: bool Choose if timestep of minimal feed-in is returned. n: int Number of maximal/minimal snapshots Returns ------- calc_snapshots : 'pandas.core.indexes.datetimes.DatetimeIndex' List containing snapshots """ if carrier == 'residual load': power_plants = network.generators[network.generators.carrier. isin(['solar', 'wind', 'wind_onshore'])] power_plants_t = network.generators.p_nom[power_plants.index] * \ network.generators_t.p_max_pu[power_plants.index] load = network.loads_t.p_set.sum(axis=1) all_renew = power_plants_t.sum(axis=1) all_carrier = load - all_renew if carrier in ('solar', 'wind', 'wind_onshore', 'wind_offshore', 'run_of_river'): power_plants = network.generators[network.generators.carrier == carrier] power_plants_t = network.generators.p_nom[power_plants.index] * \ network.generators_t.p_max_pu[power_plants.index] all_carrier = power_plants_t.sum(axis=1) if maximum and not minimum: times = all_carrier.sort_values().head(n=n) if minimum and not maximum: times = all_carrier.sort_values().tail(n=n) if maximum and minimum: times = all_carrier.sort_values().head(n=n) times = times.append(all_carrier.sort_values().tail(n=n)) calc_snapshots = all_carrier.index[all_carrier.index.isin(times.index)] return calc_snapshots
python
def find_snapshots(network, carrier, maximum = True, minimum = True, n = 3): """ Function that returns snapshots with maximum and/or minimum feed-in of selected carrier. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA carrier: str Selected carrier of generators maximum: bool Choose if timestep of maximal feed-in is returned. minimum: bool Choose if timestep of minimal feed-in is returned. n: int Number of maximal/minimal snapshots Returns ------- calc_snapshots : 'pandas.core.indexes.datetimes.DatetimeIndex' List containing snapshots """ if carrier == 'residual load': power_plants = network.generators[network.generators.carrier. isin(['solar', 'wind', 'wind_onshore'])] power_plants_t = network.generators.p_nom[power_plants.index] * \ network.generators_t.p_max_pu[power_plants.index] load = network.loads_t.p_set.sum(axis=1) all_renew = power_plants_t.sum(axis=1) all_carrier = load - all_renew if carrier in ('solar', 'wind', 'wind_onshore', 'wind_offshore', 'run_of_river'): power_plants = network.generators[network.generators.carrier == carrier] power_plants_t = network.generators.p_nom[power_plants.index] * \ network.generators_t.p_max_pu[power_plants.index] all_carrier = power_plants_t.sum(axis=1) if maximum and not minimum: times = all_carrier.sort_values().head(n=n) if minimum and not maximum: times = all_carrier.sort_values().tail(n=n) if maximum and minimum: times = all_carrier.sort_values().head(n=n) times = times.append(all_carrier.sort_values().tail(n=n)) calc_snapshots = all_carrier.index[all_carrier.index.isin(times.index)] return calc_snapshots
['def', 'find_snapshots', '(', 'network', ',', 'carrier', ',', 'maximum', '=', 'True', ',', 'minimum', '=', 'True', ',', 'n', '=', '3', ')', ':', 'if', 'carrier', '==', "'residual load'", ':', 'power_plants', '=', 'network', '.', 'generators', '[', 'network', '.', 'generators', '.', 'carrier', '.', 'isin', '(', '[', "'solar'", ',', "'wind'", ',', "'wind_onshore'", ']', ')', ']', 'power_plants_t', '=', 'network', '.', 'generators', '.', 'p_nom', '[', 'power_plants', '.', 'index', ']', '*', 'network', '.', 'generators_t', '.', 'p_max_pu', '[', 'power_plants', '.', 'index', ']', 'load', '=', 'network', '.', 'loads_t', '.', 'p_set', '.', 'sum', '(', 'axis', '=', '1', ')', 'all_renew', '=', 'power_plants_t', '.', 'sum', '(', 'axis', '=', '1', ')', 'all_carrier', '=', 'load', '-', 'all_renew', 'if', 'carrier', 'in', '(', "'solar'", ',', "'wind'", ',', "'wind_onshore'", ',', "'wind_offshore'", ',', "'run_of_river'", ')', ':', 'power_plants', '=', 'network', '.', 'generators', '[', 'network', '.', 'generators', '.', 'carrier', '==', 'carrier', ']', 'power_plants_t', '=', 'network', '.', 'generators', '.', 'p_nom', '[', 'power_plants', '.', 'index', ']', '*', 'network', '.', 'generators_t', '.', 'p_max_pu', '[', 'power_plants', '.', 'index', ']', 'all_carrier', '=', 'power_plants_t', '.', 'sum', '(', 'axis', '=', '1', ')', 'if', 'maximum', 'and', 'not', 'minimum', ':', 'times', '=', 'all_carrier', '.', 'sort_values', '(', ')', '.', 'head', '(', 'n', '=', 'n', ')', 'if', 'minimum', 'and', 'not', 'maximum', ':', 'times', '=', 'all_carrier', '.', 'sort_values', '(', ')', '.', 'tail', '(', 'n', '=', 'n', ')', 'if', 'maximum', 'and', 'minimum', ':', 'times', '=', 'all_carrier', '.', 'sort_values', '(', ')', '.', 'head', '(', 'n', '=', 'n', ')', 'times', '=', 'times', '.', 'append', '(', 'all_carrier', '.', 'sort_values', '(', ')', '.', 'tail', '(', 'n', '=', 'n', ')', ')', 'calc_snapshots', '=', 'all_carrier', '.', 'index', '[', 'all_carrier', '.', 'index', '.', 'isin', '(', 'times', '.', 'index', ')', ']', 'return', 'calc_snapshots']
Function that returns snapshots with maximum and/or minimum feed-in of selected carrier. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA carrier: str Selected carrier of generators maximum: bool Choose if timestep of maximal feed-in is returned. minimum: bool Choose if timestep of minimal feed-in is returned. n: int Number of maximal/minimal snapshots Returns ------- calc_snapshots : 'pandas.core.indexes.datetimes.DatetimeIndex' List containing snapshots
['Function', 'that', 'returns', 'snapshots', 'with', 'maximum', 'and', '/', 'or', 'minimum', 'feed', '-', 'in', 'of', 'selected', 'carrier', '.']
train
https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/tools/utilities.py#L1477-L1532
5,245
gwpy/gwpy
gwpy/io/ligolw.py
write_tables
def write_tables(target, tables, append=False, overwrite=False, **kwargs): """Write an LIGO_LW table to file Parameters ---------- target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into tables : `list`, `tuple` of :class:`~ligo.lw.table.Table` the tables to write append : `bool`, optional, default: `False` if `True`, append to an existing file/table, otherwise `overwrite` overwrite : `bool`, optional, default: `False` if `True`, delete an existing instance of the table type, otherwise append new rows **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate """ from ligo.lw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler) from ligo.lw import utils as ligolw_utils # allow writing directly to XML if isinstance(target, (Document, LIGO_LW)): xmldoc = target # open existing document, if possible elif append: xmldoc = open_xmldoc( target, contenthandler=kwargs.pop('contenthandler', LIGOLWContentHandler)) # fail on existing document and not overwriting elif (not overwrite and isinstance(target, string_types) and os.path.isfile(target)): raise IOError("File exists: {}".format(target)) else: # or create a new document xmldoc = Document() # convert table to format write_tables_to_document(xmldoc, tables, overwrite=overwrite) # write file if isinstance(target, string_types): kwargs.setdefault('gz', target.endswith('.gz')) ligolw_utils.write_filename(xmldoc, target, **kwargs) elif isinstance(target, FILE_LIKE): kwargs.setdefault('gz', target.name.endswith('.gz')) ligolw_utils.write_fileobj(xmldoc, target, **kwargs)
python
def write_tables(target, tables, append=False, overwrite=False, **kwargs): """Write an LIGO_LW table to file Parameters ---------- target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into tables : `list`, `tuple` of :class:`~ligo.lw.table.Table` the tables to write append : `bool`, optional, default: `False` if `True`, append to an existing file/table, otherwise `overwrite` overwrite : `bool`, optional, default: `False` if `True`, delete an existing instance of the table type, otherwise append new rows **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate """ from ligo.lw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler) from ligo.lw import utils as ligolw_utils # allow writing directly to XML if isinstance(target, (Document, LIGO_LW)): xmldoc = target # open existing document, if possible elif append: xmldoc = open_xmldoc( target, contenthandler=kwargs.pop('contenthandler', LIGOLWContentHandler)) # fail on existing document and not overwriting elif (not overwrite and isinstance(target, string_types) and os.path.isfile(target)): raise IOError("File exists: {}".format(target)) else: # or create a new document xmldoc = Document() # convert table to format write_tables_to_document(xmldoc, tables, overwrite=overwrite) # write file if isinstance(target, string_types): kwargs.setdefault('gz', target.endswith('.gz')) ligolw_utils.write_filename(xmldoc, target, **kwargs) elif isinstance(target, FILE_LIKE): kwargs.setdefault('gz', target.name.endswith('.gz')) ligolw_utils.write_fileobj(xmldoc, target, **kwargs)
['def', 'write_tables', '(', 'target', ',', 'tables', ',', 'append', '=', 'False', ',', 'overwrite', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'from', 'ligo', '.', 'lw', '.', 'ligolw', 'import', '(', 'Document', ',', 'LIGO_LW', ',', 'LIGOLWContentHandler', ')', 'from', 'ligo', '.', 'lw', 'import', 'utils', 'as', 'ligolw_utils', '# allow writing directly to XML', 'if', 'isinstance', '(', 'target', ',', '(', 'Document', ',', 'LIGO_LW', ')', ')', ':', 'xmldoc', '=', 'target', '# open existing document, if possible', 'elif', 'append', ':', 'xmldoc', '=', 'open_xmldoc', '(', 'target', ',', 'contenthandler', '=', 'kwargs', '.', 'pop', '(', "'contenthandler'", ',', 'LIGOLWContentHandler', ')', ')', '# fail on existing document and not overwriting', 'elif', '(', 'not', 'overwrite', 'and', 'isinstance', '(', 'target', ',', 'string_types', ')', 'and', 'os', '.', 'path', '.', 'isfile', '(', 'target', ')', ')', ':', 'raise', 'IOError', '(', '"File exists: {}"', '.', 'format', '(', 'target', ')', ')', 'else', ':', '# or create a new document', 'xmldoc', '=', 'Document', '(', ')', '# convert table to format', 'write_tables_to_document', '(', 'xmldoc', ',', 'tables', ',', 'overwrite', '=', 'overwrite', ')', '# write file', 'if', 'isinstance', '(', 'target', ',', 'string_types', ')', ':', 'kwargs', '.', 'setdefault', '(', "'gz'", ',', 'target', '.', 'endswith', '(', "'.gz'", ')', ')', 'ligolw_utils', '.', 'write_filename', '(', 'xmldoc', ',', 'target', ',', '*', '*', 'kwargs', ')', 'elif', 'isinstance', '(', 'target', ',', 'FILE_LIKE', ')', ':', 'kwargs', '.', 'setdefault', '(', "'gz'", ',', 'target', '.', 'name', '.', 'endswith', '(', "'.gz'", ')', ')', 'ligolw_utils', '.', 'write_fileobj', '(', 'xmldoc', ',', 'target', ',', '*', '*', 'kwargs', ')']
Write an LIGO_LW table to file Parameters ---------- target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into tables : `list`, `tuple` of :class:`~ligo.lw.table.Table` the tables to write append : `bool`, optional, default: `False` if `True`, append to an existing file/table, otherwise `overwrite` overwrite : `bool`, optional, default: `False` if `True`, delete an existing instance of the table type, otherwise append new rows **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate
['Write', 'an', 'LIGO_LW', 'table', 'to', 'file']
train
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L498-L548
5,246
thusoy/python-crypt
pcrypt.py
double_prompt_for_plaintext_password
def double_prompt_for_plaintext_password(): """Get the desired password from the user through a double prompt.""" password = 1 password_repeat = 2 while password != password_repeat: password = getpass.getpass('Enter password: ') password_repeat = getpass.getpass('Repeat password: ') if password != password_repeat: sys.stderr.write('Passwords do not match, try again.\n') return password
python
def double_prompt_for_plaintext_password(): """Get the desired password from the user through a double prompt.""" password = 1 password_repeat = 2 while password != password_repeat: password = getpass.getpass('Enter password: ') password_repeat = getpass.getpass('Repeat password: ') if password != password_repeat: sys.stderr.write('Passwords do not match, try again.\n') return password
['def', 'double_prompt_for_plaintext_password', '(', ')', ':', 'password', '=', '1', 'password_repeat', '=', '2', 'while', 'password', '!=', 'password_repeat', ':', 'password', '=', 'getpass', '.', 'getpass', '(', "'Enter password: '", ')', 'password_repeat', '=', 'getpass', '.', 'getpass', '(', "'Repeat password: '", ')', 'if', 'password', '!=', 'password_repeat', ':', 'sys', '.', 'stderr', '.', 'write', '(', "'Passwords do not match, try again.\\n'", ')', 'return', 'password']
Get the desired password from the user through a double prompt.
['Get', 'the', 'desired', 'password', 'from', 'the', 'user', 'through', 'a', 'double', 'prompt', '.']
train
https://github.com/thusoy/python-crypt/blob/0835f7568c14762890cea70b7605d04b3459e4a0/pcrypt.py#L286-L295
5,247
senseobservationsystems/commonsense-python-lib
senseapi.py
SenseAPI.DataProcessorsDelete
def DataProcessorsDelete(self, dataProcessorId): """ Delete a data processor in CommonSense. @param dataProcessorId - The id of the data processor that will be deleted. @return (bool) - Boolean indicating whether GroupsPost was successful. """ if self.__SenseApiCall__('/dataprocessors/{id}.json'.format(id = dataProcessorId), 'DELETE'): return True else: self.__error__ = "api call unsuccessful" return False
python
def DataProcessorsDelete(self, dataProcessorId): """ Delete a data processor in CommonSense. @param dataProcessorId - The id of the data processor that will be deleted. @return (bool) - Boolean indicating whether GroupsPost was successful. """ if self.__SenseApiCall__('/dataprocessors/{id}.json'.format(id = dataProcessorId), 'DELETE'): return True else: self.__error__ = "api call unsuccessful" return False
['def', 'DataProcessorsDelete', '(', 'self', ',', 'dataProcessorId', ')', ':', 'if', 'self', '.', '__SenseApiCall__', '(', "'/dataprocessors/{id}.json'", '.', 'format', '(', 'id', '=', 'dataProcessorId', ')', ',', "'DELETE'", ')', ':', 'return', 'True', 'else', ':', 'self', '.', '__error__', '=', '"api call unsuccessful"', 'return', 'False']
Delete a data processor in CommonSense. @param dataProcessorId - The id of the data processor that will be deleted. @return (bool) - Boolean indicating whether GroupsPost was successful.
['Delete', 'a', 'data', 'processor', 'in', 'CommonSense', '.']
train
https://github.com/senseobservationsystems/commonsense-python-lib/blob/aac59a1751ef79eb830b3ca1fab6ef2c83931f87/senseapi.py#L1830-L1842
5,248
ninuxorg/nodeshot
nodeshot/community/profiles/serializers.py
ResetPasswordKeySerializer.update
def update(self, instance, validated_data): """ change password """ instance.user.set_password(validated_data["password1"]) instance.user.full_clean() instance.user.save() # mark password reset object as reset instance.reset = True instance.full_clean() instance.save() return instance
python
def update(self, instance, validated_data): """ change password """ instance.user.set_password(validated_data["password1"]) instance.user.full_clean() instance.user.save() # mark password reset object as reset instance.reset = True instance.full_clean() instance.save() return instance
['def', 'update', '(', 'self', ',', 'instance', ',', 'validated_data', ')', ':', 'instance', '.', 'user', '.', 'set_password', '(', 'validated_data', '[', '"password1"', ']', ')', 'instance', '.', 'user', '.', 'full_clean', '(', ')', 'instance', '.', 'user', '.', 'save', '(', ')', '# mark password reset object as reset', 'instance', '.', 'reset', '=', 'True', 'instance', '.', 'full_clean', '(', ')', 'instance', '.', 'save', '(', ')', 'return', 'instance']
change password
['change', 'password']
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/profiles/serializers.py#L312-L321
5,249
mcs07/ChemDataExtractor
chemdataextractor/cli/pos.py
train
def train(ctx, output, corpus, clusters): """Train POS Tagger.""" click.echo('chemdataextractor.pos.train') click.echo('Output: %s' % output) click.echo('Corpus: %s' % corpus) click.echo('Clusters: %s' % clusters) wsj_sents = [] genia_sents = [] if corpus == 'wsj' or corpus == 'wsj+genia': wsj_sents = list(wsj_training.tagged_sents()) # For WSJ, remove all tokens with -NONE- tag for i, wsj_sent in enumerate(wsj_sents): wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-'] if corpus == 'genia' or corpus == 'wsj+genia': genia_sents = list(genia_training.tagged_sents()) # Translate GENIA for i, genia_sent in enumerate(genia_sents): for j, (token, tag) in enumerate(genia_sent): if tag == '(': genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation) elif tag == ')': genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation) elif tag == 'CT': genia_sents[i][j] = (token, 'DT') # Typo? elif tag == 'XT': genia_sents[i][j] = (token, 'DT') # Typo? elif tag == '-': genia_sents[i][j] = (token, ':') # Single hyphen character for dash elif tag == 'N': genia_sents[i][j] = (token, 'NN') # Typo? elif tag == 'PP': genia_sents[i][j] = (token, 'PRP') # Typo? elif tag == '' and token == ')': genia_sents[i][j] = (token, '-RRB-') # Typo? elif tag == '' and token == 'IFN-gamma': genia_sents[i][j] = (token, 'NN') # Typo? elif '|' in tag: genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part # Filter any tags not in the allowed tagset (Shouldn't be any left anyway) genia_sents[i] = [t for t in genia_sent if t[1] in TAGS] if corpus == 'wsj': training_corpus = wsj_sents elif corpus == 'genia': training_corpus = genia_sents elif corpus == 'wsj+genia': training_corpus = wsj_sents + genia_sents else: raise click.ClickException('Invalid corpus') tagger = ChemCrfPosTagger(clusters=clusters) tagger.train(training_corpus, output)
python
def train(ctx, output, corpus, clusters): """Train POS Tagger.""" click.echo('chemdataextractor.pos.train') click.echo('Output: %s' % output) click.echo('Corpus: %s' % corpus) click.echo('Clusters: %s' % clusters) wsj_sents = [] genia_sents = [] if corpus == 'wsj' or corpus == 'wsj+genia': wsj_sents = list(wsj_training.tagged_sents()) # For WSJ, remove all tokens with -NONE- tag for i, wsj_sent in enumerate(wsj_sents): wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-'] if corpus == 'genia' or corpus == 'wsj+genia': genia_sents = list(genia_training.tagged_sents()) # Translate GENIA for i, genia_sent in enumerate(genia_sents): for j, (token, tag) in enumerate(genia_sent): if tag == '(': genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation) elif tag == ')': genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation) elif tag == 'CT': genia_sents[i][j] = (token, 'DT') # Typo? elif tag == 'XT': genia_sents[i][j] = (token, 'DT') # Typo? elif tag == '-': genia_sents[i][j] = (token, ':') # Single hyphen character for dash elif tag == 'N': genia_sents[i][j] = (token, 'NN') # Typo? elif tag == 'PP': genia_sents[i][j] = (token, 'PRP') # Typo? elif tag == '' and token == ')': genia_sents[i][j] = (token, '-RRB-') # Typo? elif tag == '' and token == 'IFN-gamma': genia_sents[i][j] = (token, 'NN') # Typo? elif '|' in tag: genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part # Filter any tags not in the allowed tagset (Shouldn't be any left anyway) genia_sents[i] = [t for t in genia_sent if t[1] in TAGS] if corpus == 'wsj': training_corpus = wsj_sents elif corpus == 'genia': training_corpus = genia_sents elif corpus == 'wsj+genia': training_corpus = wsj_sents + genia_sents else: raise click.ClickException('Invalid corpus') tagger = ChemCrfPosTagger(clusters=clusters) tagger.train(training_corpus, output)
['def', 'train', '(', 'ctx', ',', 'output', ',', 'corpus', ',', 'clusters', ')', ':', 'click', '.', 'echo', '(', "'chemdataextractor.pos.train'", ')', 'click', '.', 'echo', '(', "'Output: %s'", '%', 'output', ')', 'click', '.', 'echo', '(', "'Corpus: %s'", '%', 'corpus', ')', 'click', '.', 'echo', '(', "'Clusters: %s'", '%', 'clusters', ')', 'wsj_sents', '=', '[', ']', 'genia_sents', '=', '[', ']', 'if', 'corpus', '==', "'wsj'", 'or', 'corpus', '==', "'wsj+genia'", ':', 'wsj_sents', '=', 'list', '(', 'wsj_training', '.', 'tagged_sents', '(', ')', ')', '# For WSJ, remove all tokens with -NONE- tag', 'for', 'i', ',', 'wsj_sent', 'in', 'enumerate', '(', 'wsj_sents', ')', ':', 'wsj_sents', '[', 'i', ']', '=', '[', 't', 'for', 't', 'in', 'wsj_sent', 'if', 'not', 't', '[', '1', ']', '==', "'-NONE-'", ']', 'if', 'corpus', '==', "'genia'", 'or', 'corpus', '==', "'wsj+genia'", ':', 'genia_sents', '=', 'list', '(', 'genia_training', '.', 'tagged_sents', '(', ')', ')', '# Translate GENIA', 'for', 'i', ',', 'genia_sent', 'in', 'enumerate', '(', 'genia_sents', ')', ':', 'for', 'j', ',', '(', 'token', ',', 'tag', ')', 'in', 'enumerate', '(', 'genia_sent', ')', ':', 'if', 'tag', '==', "'('", ':', 'genia_sents', '[', 'i', ']', '[', 'j', ']', '=', '(', 'token', ',', "'-LRB-'", ')', '# ( to -RLB- (also do for evaluation)', 'elif', 'tag', '==', "')'", ':', 'genia_sents', '[', 'i', ']', '[', 'j', ']', '=', '(', 'token', ',', "'-RRB-'", ')', '# ) to -RRB- (also do for evaluation)', 'elif', 'tag', '==', "'CT'", ':', 'genia_sents', '[', 'i', ']', '[', 'j', ']', '=', '(', 'token', ',', "'DT'", ')', '# Typo?', 'elif', 'tag', '==', "'XT'", ':', 'genia_sents', '[', 'i', ']', '[', 'j', ']', '=', '(', 'token', ',', "'DT'", ')', '# Typo?', 'elif', 'tag', '==', "'-'", ':', 'genia_sents', '[', 'i', ']', '[', 'j', ']', '=', '(', 'token', ',', "':'", ')', '# Single hyphen character for dash', 'elif', 'tag', '==', "'N'", ':', 'genia_sents', '[', 'i', ']', '[', 'j', ']', '=', '(', 'token', ',', "'NN'", ')', '# Typo?', 'elif', 'tag', '==', "'PP'", ':', 'genia_sents', '[', 'i', ']', '[', 'j', ']', '=', '(', 'token', ',', "'PRP'", ')', '# Typo?', 'elif', 'tag', '==', "''", 'and', 'token', '==', "')'", ':', 'genia_sents', '[', 'i', ']', '[', 'j', ']', '=', '(', 'token', ',', "'-RRB-'", ')', '# Typo?', 'elif', 'tag', '==', "''", 'and', 'token', '==', "'IFN-gamma'", ':', 'genia_sents', '[', 'i', ']', '[', 'j', ']', '=', '(', 'token', ',', "'NN'", ')', '# Typo?', 'elif', "'|'", 'in', 'tag', ':', 'genia_sents', '[', 'i', ']', '[', 'j', ']', '=', '(', 'token', ',', 'tag', '.', 'split', '(', "'|'", ')', '[', '0', ']', ')', '# If contains |, choose first part', "# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)", 'genia_sents', '[', 'i', ']', '=', '[', 't', 'for', 't', 'in', 'genia_sent', 'if', 't', '[', '1', ']', 'in', 'TAGS', ']', 'if', 'corpus', '==', "'wsj'", ':', 'training_corpus', '=', 'wsj_sents', 'elif', 'corpus', '==', "'genia'", ':', 'training_corpus', '=', 'genia_sents', 'elif', 'corpus', '==', "'wsj+genia'", ':', 'training_corpus', '=', 'wsj_sents', '+', 'genia_sents', 'else', ':', 'raise', 'click', '.', 'ClickException', '(', "'Invalid corpus'", ')', 'tagger', '=', 'ChemCrfPosTagger', '(', 'clusters', '=', 'clusters', ')', 'tagger', '.', 'train', '(', 'training_corpus', ',', 'output', ')']
Train POS Tagger.
['Train', 'POS', 'Tagger', '.']
train
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/pos.py#L73-L127
5,250
barrust/mediawiki
mediawiki/mediawiki.py
MediaWiki._get_response
def _get_response(self, params): """ wrap the call to the requests package """ return self._session.get( self._api_url, params=params, timeout=self._timeout ).json(encoding="utf8")
python
def _get_response(self, params): """ wrap the call to the requests package """ return self._session.get( self._api_url, params=params, timeout=self._timeout ).json(encoding="utf8")
['def', '_get_response', '(', 'self', ',', 'params', ')', ':', 'return', 'self', '.', '_session', '.', 'get', '(', 'self', '.', '_api_url', ',', 'params', '=', 'params', ',', 'timeout', '=', 'self', '.', '_timeout', ')', '.', 'json', '(', 'encoding', '=', '"utf8"', ')']
wrap the call to the requests package
['wrap', 'the', 'call', 'to', 'the', 'requests', 'package']
train
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L897-L901
5,251
COLORFULBOARD/revision
revision/client.py
Client.dest_path
def dest_path(self): """ :return: The destination path. :rtype: str """ if os.path.isabs(self.config.local_path): return self.config.local_path else: return os.path.normpath(os.path.join( os.getcwd(), self.config.local_path ))
python
def dest_path(self): """ :return: The destination path. :rtype: str """ if os.path.isabs(self.config.local_path): return self.config.local_path else: return os.path.normpath(os.path.join( os.getcwd(), self.config.local_path ))
['def', 'dest_path', '(', 'self', ')', ':', 'if', 'os', '.', 'path', '.', 'isabs', '(', 'self', '.', 'config', '.', 'local_path', ')', ':', 'return', 'self', '.', 'config', '.', 'local_path', 'else', ':', 'return', 'os', '.', 'path', '.', 'normpath', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'getcwd', '(', ')', ',', 'self', '.', 'config', '.', 'local_path', ')', ')']
:return: The destination path. :rtype: str
[':', 'return', ':', 'The', 'destination', 'path', '.', ':', 'rtype', ':', 'str']
train
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/client.py#L211-L222
5,252
bykof/billomapy
billomapy/billomapy.py
Billomapy._create_put_request
def _create_put_request(self, resource, billomat_id, command=None, send_data=None): """ Creates a put request and return the response data """ assert (isinstance(resource, str)) if isinstance(billomat_id, int): billomat_id = str(billomat_id) if not command: command = '' else: command = '/' + command response = self.session.put( url=self.api_url + resource + '/' + billomat_id + command, data=json.dumps(send_data), ) return self._handle_response(response)
python
def _create_put_request(self, resource, billomat_id, command=None, send_data=None): """ Creates a put request and return the response data """ assert (isinstance(resource, str)) if isinstance(billomat_id, int): billomat_id = str(billomat_id) if not command: command = '' else: command = '/' + command response = self.session.put( url=self.api_url + resource + '/' + billomat_id + command, data=json.dumps(send_data), ) return self._handle_response(response)
['def', '_create_put_request', '(', 'self', ',', 'resource', ',', 'billomat_id', ',', 'command', '=', 'None', ',', 'send_data', '=', 'None', ')', ':', 'assert', '(', 'isinstance', '(', 'resource', ',', 'str', ')', ')', 'if', 'isinstance', '(', 'billomat_id', ',', 'int', ')', ':', 'billomat_id', '=', 'str', '(', 'billomat_id', ')', 'if', 'not', 'command', ':', 'command', '=', "''", 'else', ':', 'command', '=', "'/'", '+', 'command', 'response', '=', 'self', '.', 'session', '.', 'put', '(', 'url', '=', 'self', '.', 'api_url', '+', 'resource', '+', "'/'", '+', 'billomat_id', '+', 'command', ',', 'data', '=', 'json', '.', 'dumps', '(', 'send_data', ')', ',', ')', 'return', 'self', '.', '_handle_response', '(', 'response', ')']
Creates a put request and return the response data
['Creates', 'a', 'put', 'request', 'and', 'return', 'the', 'response', 'data']
train
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L87-L106
5,253
spencerahill/aospy
aospy/utils/times.py
extract_months
def extract_months(time, months): """Extract times within specified months of the year. Parameters ---------- time : xarray.DataArray Array of times that can be represented by numpy.datetime64 objects (i.e. the year is between 1678 and 2262). months : Desired months of the year to include Returns ------- xarray.DataArray of the desired times """ inds = _month_conditional(time, months) return time.sel(time=inds)
python
def extract_months(time, months): """Extract times within specified months of the year. Parameters ---------- time : xarray.DataArray Array of times that can be represented by numpy.datetime64 objects (i.e. the year is between 1678 and 2262). months : Desired months of the year to include Returns ------- xarray.DataArray of the desired times """ inds = _month_conditional(time, months) return time.sel(time=inds)
['def', 'extract_months', '(', 'time', ',', 'months', ')', ':', 'inds', '=', '_month_conditional', '(', 'time', ',', 'months', ')', 'return', 'time', '.', 'sel', '(', 'time', '=', 'inds', ')']
Extract times within specified months of the year. Parameters ---------- time : xarray.DataArray Array of times that can be represented by numpy.datetime64 objects (i.e. the year is between 1678 and 2262). months : Desired months of the year to include Returns ------- xarray.DataArray of the desired times
['Extract', 'times', 'within', 'specified', 'months', 'of', 'the', 'year', '.']
train
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/utils/times.py#L292-L307
5,254
DLR-RM/RAFCON
source/rafcon/core/library_manager.py
LibraryManager._get_library_root_key_for_os_path
def _get_library_root_key_for_os_path(self, path): """Return library root key if path is within library root paths""" path = os.path.realpath(path) library_root_key = None for library_root_key, library_root_path in self._library_root_paths.items(): rel_path = os.path.relpath(path, library_root_path) if rel_path.startswith('..'): library_root_key = None continue else: break return library_root_key
python
def _get_library_root_key_for_os_path(self, path): """Return library root key if path is within library root paths""" path = os.path.realpath(path) library_root_key = None for library_root_key, library_root_path in self._library_root_paths.items(): rel_path = os.path.relpath(path, library_root_path) if rel_path.startswith('..'): library_root_key = None continue else: break return library_root_key
['def', '_get_library_root_key_for_os_path', '(', 'self', ',', 'path', ')', ':', 'path', '=', 'os', '.', 'path', '.', 'realpath', '(', 'path', ')', 'library_root_key', '=', 'None', 'for', 'library_root_key', ',', 'library_root_path', 'in', 'self', '.', '_library_root_paths', '.', 'items', '(', ')', ':', 'rel_path', '=', 'os', '.', 'path', '.', 'relpath', '(', 'path', ',', 'library_root_path', ')', 'if', 'rel_path', '.', 'startswith', '(', "'..'", ')', ':', 'library_root_key', '=', 'None', 'continue', 'else', ':', 'break', 'return', 'library_root_key']
Return library root key if path is within library root paths
['Return', 'library', 'root', 'key', 'if', 'path', 'is', 'within', 'library', 'root', 'paths']
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/library_manager.py#L326-L337
5,255
BerkeleyAutomation/perception
perception/cnn.py
AlexNet._parse_config
def _parse_config(self, config): """ Parses a tensorflow configuration """ self._batch_size = config['batch_size'] self._im_height = config['im_height'] self._im_width = config['im_width'] self._num_channels = config['channels'] self._output_layer = config['out_layer'] self._feature_layer = config['feature_layer'] self._out_size = None if 'out_size' in config.keys(): self._out_size = config['out_size'] self._input_arr = np.zeros([self._batch_size, self._im_height, self._im_width, self._num_channels]) if self._model_dir is None: self._net_data = np.load(config['caffe_weights']).item() self._mean = np.load(config['mean_file']) self._model_filename = None else: self._net_data = None self._mean = np.load(os.path.join(self._model_dir, 'mean.npy')) self._model_filename = os.path.join(self._model_dir, 'model.ckpt')
python
def _parse_config(self, config): """ Parses a tensorflow configuration """ self._batch_size = config['batch_size'] self._im_height = config['im_height'] self._im_width = config['im_width'] self._num_channels = config['channels'] self._output_layer = config['out_layer'] self._feature_layer = config['feature_layer'] self._out_size = None if 'out_size' in config.keys(): self._out_size = config['out_size'] self._input_arr = np.zeros([self._batch_size, self._im_height, self._im_width, self._num_channels]) if self._model_dir is None: self._net_data = np.load(config['caffe_weights']).item() self._mean = np.load(config['mean_file']) self._model_filename = None else: self._net_data = None self._mean = np.load(os.path.join(self._model_dir, 'mean.npy')) self._model_filename = os.path.join(self._model_dir, 'model.ckpt')
['def', '_parse_config', '(', 'self', ',', 'config', ')', ':', 'self', '.', '_batch_size', '=', 'config', '[', "'batch_size'", ']', 'self', '.', '_im_height', '=', 'config', '[', "'im_height'", ']', 'self', '.', '_im_width', '=', 'config', '[', "'im_width'", ']', 'self', '.', '_num_channels', '=', 'config', '[', "'channels'", ']', 'self', '.', '_output_layer', '=', 'config', '[', "'out_layer'", ']', 'self', '.', '_feature_layer', '=', 'config', '[', "'feature_layer'", ']', 'self', '.', '_out_size', '=', 'None', 'if', "'out_size'", 'in', 'config', '.', 'keys', '(', ')', ':', 'self', '.', '_out_size', '=', 'config', '[', "'out_size'", ']', 'self', '.', '_input_arr', '=', 'np', '.', 'zeros', '(', '[', 'self', '.', '_batch_size', ',', 'self', '.', '_im_height', ',', 'self', '.', '_im_width', ',', 'self', '.', '_num_channels', ']', ')', 'if', 'self', '.', '_model_dir', 'is', 'None', ':', 'self', '.', '_net_data', '=', 'np', '.', 'load', '(', 'config', '[', "'caffe_weights'", ']', ')', '.', 'item', '(', ')', 'self', '.', '_mean', '=', 'np', '.', 'load', '(', 'config', '[', "'mean_file'", ']', ')', 'self', '.', '_model_filename', '=', 'None', 'else', ':', 'self', '.', '_net_data', '=', 'None', 'self', '.', '_mean', '=', 'np', '.', 'load', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', '_model_dir', ',', "'mean.npy'", ')', ')', 'self', '.', '_model_filename', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', '_model_dir', ',', "'model.ckpt'", ')']
Parses a tensorflow configuration
['Parses', 'a', 'tensorflow', 'configuration']
train
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/cnn.py#L72-L93
5,256
mcocdawc/chemcoord
src/chemcoord/utilities/_print_versions.py
get_sys_info
def get_sys_info(): "Returns system information as a dict" blob = [] # commit = cc._git_hash # blob.append(('commit', commit)) try: (sysname, nodename, release, version, machine, processor) = platform.uname() blob.extend([ ("python", "%d.%d.%d.%s.%s" % sys.version_info[:]), ("python-bits", struct.calcsize("P") * 8), ("OS", "%s" % (sysname)), ("OS-release", "%s" % (release)), # ("Version", "%s" % (version)), ("machine", "%s" % (machine)), ("processor", "%s" % (processor)), # ("byteorder", "%s" % sys.byteorder), ("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")), ("LANG", "%s" % os.environ.get('LANG', "None")), ("LOCALE", "%s.%s" % locale.getlocale()), ]) except Exception: pass return blob
python
def get_sys_info(): "Returns system information as a dict" blob = [] # commit = cc._git_hash # blob.append(('commit', commit)) try: (sysname, nodename, release, version, machine, processor) = platform.uname() blob.extend([ ("python", "%d.%d.%d.%s.%s" % sys.version_info[:]), ("python-bits", struct.calcsize("P") * 8), ("OS", "%s" % (sysname)), ("OS-release", "%s" % (release)), # ("Version", "%s" % (version)), ("machine", "%s" % (machine)), ("processor", "%s" % (processor)), # ("byteorder", "%s" % sys.byteorder), ("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")), ("LANG", "%s" % os.environ.get('LANG', "None")), ("LOCALE", "%s.%s" % locale.getlocale()), ]) except Exception: pass return blob
['def', 'get_sys_info', '(', ')', ':', 'blob', '=', '[', ']', '# commit = cc._git_hash', "# blob.append(('commit', commit))", 'try', ':', '(', 'sysname', ',', 'nodename', ',', 'release', ',', 'version', ',', 'machine', ',', 'processor', ')', '=', 'platform', '.', 'uname', '(', ')', 'blob', '.', 'extend', '(', '[', '(', '"python"', ',', '"%d.%d.%d.%s.%s"', '%', 'sys', '.', 'version_info', '[', ':', ']', ')', ',', '(', '"python-bits"', ',', 'struct', '.', 'calcsize', '(', '"P"', ')', '*', '8', ')', ',', '(', '"OS"', ',', '"%s"', '%', '(', 'sysname', ')', ')', ',', '(', '"OS-release"', ',', '"%s"', '%', '(', 'release', ')', ')', ',', '# ("Version", "%s" % (version)),', '(', '"machine"', ',', '"%s"', '%', '(', 'machine', ')', ')', ',', '(', '"processor"', ',', '"%s"', '%', '(', 'processor', ')', ')', ',', '# ("byteorder", "%s" % sys.byteorder),', '(', '"LC_ALL"', ',', '"%s"', '%', 'os', '.', 'environ', '.', 'get', '(', "'LC_ALL'", ',', '"None"', ')', ')', ',', '(', '"LANG"', ',', '"%s"', '%', 'os', '.', 'environ', '.', 'get', '(', "'LANG'", ',', '"None"', ')', ')', ',', '(', '"LOCALE"', ',', '"%s.%s"', '%', 'locale', '.', 'getlocale', '(', ')', ')', ',', ']', ')', 'except', 'Exception', ':', 'pass', 'return', 'blob']
Returns system information as a dict
['Returns', 'system', 'information', 'as', 'a', 'dict']
train
https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/utilities/_print_versions.py#L12-L39
5,257
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/subscribe/subscribe.py
RoutingBase.remove_routes
def remove_routes(self, item, routes): """Removes item from matching routes""" for route in routes: items = self._routes.get(route) try: items.remove(item) LOG.debug('removed item from route %s', route) except ValueError: pass if not items: self._routes.pop(route) LOG.debug('removed route %s', route)
python
def remove_routes(self, item, routes): """Removes item from matching routes""" for route in routes: items = self._routes.get(route) try: items.remove(item) LOG.debug('removed item from route %s', route) except ValueError: pass if not items: self._routes.pop(route) LOG.debug('removed route %s', route)
['def', 'remove_routes', '(', 'self', ',', 'item', ',', 'routes', ')', ':', 'for', 'route', 'in', 'routes', ':', 'items', '=', 'self', '.', '_routes', '.', 'get', '(', 'route', ')', 'try', ':', 'items', '.', 'remove', '(', 'item', ')', 'LOG', '.', 'debug', '(', "'removed item from route %s'", ',', 'route', ')', 'except', 'ValueError', ':', 'pass', 'if', 'not', 'items', ':', 'self', '.', '_routes', '.', 'pop', '(', 'route', ')', 'LOG', '.', 'debug', '(', "'removed route %s'", ',', 'route', ')']
Removes item from matching routes
['Removes', 'item', 'from', 'matching', 'routes']
train
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/subscribe/subscribe.py#L91-L102
5,258
collectiveacuity/labPack
labpack/storage/aws/s3.py
s3Client._import
def _import(self, record_key, record_data, overwrite=True, encryption='', last_modified=0.0, **kwargs): ''' a helper method for other storage clients to import into s3 :param record_key: string with key for record :param record_data: byte data for body of record :param overwrite: [optional] boolean to overwrite existing records :param encryption: [optional] string with encryption type add to metadata :param kwargs: [optional] keyword arguments from other import methods :return: boolean indicating whether record was imported ''' # define keyword arguments from time import time create_kwargs = { 'bucket_name': self.bucket_name, 'record_key': record_key, 'record_data': record_data, 'overwrite': overwrite, 'record_metadata': { 'last_modified': str(time()) } } # add encryption and last_modified if encryption: create_kwargs['record_metadata']['encryption'] = encryption if last_modified: create_kwargs['record_metadata']['last_modified'] = str(last_modified) # add record mimetype and encoding import mimetypes guess_mimetype, guess_encoding = mimetypes.guess_type(record_key) if not guess_mimetype: if record_key.find('.yaml') or record_key.find('.yml'): guess_mimetype = 'application/x-yaml' if record_key.find('.drep'): guess_mimetype = 'application/x-drep' if guess_mimetype: create_kwargs['record_mimetype'] = guess_mimetype if guess_encoding: create_kwargs['record_encoding'] = guess_encoding # create record try: self.s3.create_record(**create_kwargs) except ValueError as err: if str(err).find('already contains') > -1: self.s3.iam.printer('%s already exists in %s collection. Skipping.' % (record_key, self.bucket_name)) return False # elif str(err).find('exceeds maximum record') > -1: # self.s3.iam.printer('%s exceeds the maximum size for files on S3. Skipping.' % record_key) else: raise except: raise return True
python
def _import(self, record_key, record_data, overwrite=True, encryption='', last_modified=0.0, **kwargs): ''' a helper method for other storage clients to import into s3 :param record_key: string with key for record :param record_data: byte data for body of record :param overwrite: [optional] boolean to overwrite existing records :param encryption: [optional] string with encryption type add to metadata :param kwargs: [optional] keyword arguments from other import methods :return: boolean indicating whether record was imported ''' # define keyword arguments from time import time create_kwargs = { 'bucket_name': self.bucket_name, 'record_key': record_key, 'record_data': record_data, 'overwrite': overwrite, 'record_metadata': { 'last_modified': str(time()) } } # add encryption and last_modified if encryption: create_kwargs['record_metadata']['encryption'] = encryption if last_modified: create_kwargs['record_metadata']['last_modified'] = str(last_modified) # add record mimetype and encoding import mimetypes guess_mimetype, guess_encoding = mimetypes.guess_type(record_key) if not guess_mimetype: if record_key.find('.yaml') or record_key.find('.yml'): guess_mimetype = 'application/x-yaml' if record_key.find('.drep'): guess_mimetype = 'application/x-drep' if guess_mimetype: create_kwargs['record_mimetype'] = guess_mimetype if guess_encoding: create_kwargs['record_encoding'] = guess_encoding # create record try: self.s3.create_record(**create_kwargs) except ValueError as err: if str(err).find('already contains') > -1: self.s3.iam.printer('%s already exists in %s collection. Skipping.' % (record_key, self.bucket_name)) return False # elif str(err).find('exceeds maximum record') > -1: # self.s3.iam.printer('%s exceeds the maximum size for files on S3. Skipping.' % record_key) else: raise except: raise return True
['def', '_import', '(', 'self', ',', 'record_key', ',', 'record_data', ',', 'overwrite', '=', 'True', ',', 'encryption', '=', "''", ',', 'last_modified', '=', '0.0', ',', '*', '*', 'kwargs', ')', ':', '# define keyword arguments', 'from', 'time', 'import', 'time', 'create_kwargs', '=', '{', "'bucket_name'", ':', 'self', '.', 'bucket_name', ',', "'record_key'", ':', 'record_key', ',', "'record_data'", ':', 'record_data', ',', "'overwrite'", ':', 'overwrite', ',', "'record_metadata'", ':', '{', "'last_modified'", ':', 'str', '(', 'time', '(', ')', ')', '}', '}', '# add encryption and last_modified', 'if', 'encryption', ':', 'create_kwargs', '[', "'record_metadata'", ']', '[', "'encryption'", ']', '=', 'encryption', 'if', 'last_modified', ':', 'create_kwargs', '[', "'record_metadata'", ']', '[', "'last_modified'", ']', '=', 'str', '(', 'last_modified', ')', '# add record mimetype and encoding', 'import', 'mimetypes', 'guess_mimetype', ',', 'guess_encoding', '=', 'mimetypes', '.', 'guess_type', '(', 'record_key', ')', 'if', 'not', 'guess_mimetype', ':', 'if', 'record_key', '.', 'find', '(', "'.yaml'", ')', 'or', 'record_key', '.', 'find', '(', "'.yml'", ')', ':', 'guess_mimetype', '=', "'application/x-yaml'", 'if', 'record_key', '.', 'find', '(', "'.drep'", ')', ':', 'guess_mimetype', '=', "'application/x-drep'", 'if', 'guess_mimetype', ':', 'create_kwargs', '[', "'record_mimetype'", ']', '=', 'guess_mimetype', 'if', 'guess_encoding', ':', 'create_kwargs', '[', "'record_encoding'", ']', '=', 'guess_encoding', '# create record', 'try', ':', 'self', '.', 's3', '.', 'create_record', '(', '*', '*', 'create_kwargs', ')', 'except', 'ValueError', 'as', 'err', ':', 'if', 'str', '(', 'err', ')', '.', 'find', '(', "'already contains'", ')', '>', '-', '1', ':', 'self', '.', 's3', '.', 'iam', '.', 'printer', '(', "'%s already exists in %s collection. Skipping.'", '%', '(', 'record_key', ',', 'self', '.', 'bucket_name', ')', ')', 'return', 'False', "# elif str(err).find('exceeds maximum record') > -1:", "# self.s3.iam.printer('%s exceeds the maximum size for files on S3. Skipping.' % record_key)", 'else', ':', 'raise', 'except', ':', 'raise', 'return', 'True']
a helper method for other storage clients to import into s3 :param record_key: string with key for record :param record_data: byte data for body of record :param overwrite: [optional] boolean to overwrite existing records :param encryption: [optional] string with encryption type add to metadata :param kwargs: [optional] keyword arguments from other import methods :return: boolean indicating whether record was imported
['a', 'helper', 'method', 'for', 'other', 'storage', 'clients', 'to', 'import', 'into', 's3', ':', 'param', 'record_key', ':', 'string', 'with', 'key', 'for', 'record', ':', 'param', 'record_data', ':', 'byte', 'data', 'for', 'body', 'of', 'record', ':', 'param', 'overwrite', ':', '[', 'optional', ']', 'boolean', 'to', 'overwrite', 'existing', 'records', ':', 'param', 'encryption', ':', '[', 'optional', ']', 'string', 'with', 'encryption', 'type', 'add', 'to', 'metadata', ':', 'param', 'kwargs', ':', '[', 'optional', ']', 'keyword', 'arguments', 'from', 'other', 'import', 'methods', ':', 'return', ':', 'boolean', 'indicating', 'whether', 'record', 'was', 'imported']
train
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L1639-L1695
5,259
kalefranz/auxlib
auxlib/decorators.py
memoize
def memoize(func): """ Decorator to cause a function to cache it's results for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> @memoize ... def foo(x): ... print('running function with', x) ... return x+3 ... >>> foo(10) running function with 10 13 >>> foo(10) 13 >>> foo(11) running function with 11 14 >>> @memoize ... def range_tuple(limit): ... print('running function') ... return tuple(i for i in range(limit)) ... >>> range_tuple(3) running function (0, 1, 2) >>> range_tuple(3) (0, 1, 2) >>> @memoize ... def range_iter(limit): ... print('running function') ... return (i for i in range(limit)) ... >>> range_iter(3) Traceback (most recent call last): TypeError: Can't memoize a generator or non-hashable object! """ func._result_cache = {} # pylint: disable-msg=W0212 @wraps(func) def _memoized_func(*args, **kwargs): key = (args, tuple(sorted(kwargs.items()))) if key in func._result_cache: # pylint: disable-msg=W0212 return func._result_cache[key] # pylint: disable-msg=W0212 else: result = func(*args, **kwargs) if isinstance(result, GeneratorType) or not isinstance(result, Hashable): raise TypeError("Can't memoize a generator or non-hashable object!") func._result_cache[key] = result # pylint: disable-msg=W0212 return result return _memoized_func
python
def memoize(func): """ Decorator to cause a function to cache it's results for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> @memoize ... def foo(x): ... print('running function with', x) ... return x+3 ... >>> foo(10) running function with 10 13 >>> foo(10) 13 >>> foo(11) running function with 11 14 >>> @memoize ... def range_tuple(limit): ... print('running function') ... return tuple(i for i in range(limit)) ... >>> range_tuple(3) running function (0, 1, 2) >>> range_tuple(3) (0, 1, 2) >>> @memoize ... def range_iter(limit): ... print('running function') ... return (i for i in range(limit)) ... >>> range_iter(3) Traceback (most recent call last): TypeError: Can't memoize a generator or non-hashable object! """ func._result_cache = {} # pylint: disable-msg=W0212 @wraps(func) def _memoized_func(*args, **kwargs): key = (args, tuple(sorted(kwargs.items()))) if key in func._result_cache: # pylint: disable-msg=W0212 return func._result_cache[key] # pylint: disable-msg=W0212 else: result = func(*args, **kwargs) if isinstance(result, GeneratorType) or not isinstance(result, Hashable): raise TypeError("Can't memoize a generator or non-hashable object!") func._result_cache[key] = result # pylint: disable-msg=W0212 return result return _memoized_func
['def', 'memoize', '(', 'func', ')', ':', 'func', '.', '_result_cache', '=', '{', '}', '# pylint: disable-msg=W0212', '@', 'wraps', '(', 'func', ')', 'def', '_memoized_func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'key', '=', '(', 'args', ',', 'tuple', '(', 'sorted', '(', 'kwargs', '.', 'items', '(', ')', ')', ')', ')', 'if', 'key', 'in', 'func', '.', '_result_cache', ':', '# pylint: disable-msg=W0212', 'return', 'func', '.', '_result_cache', '[', 'key', ']', '# pylint: disable-msg=W0212', 'else', ':', 'result', '=', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'isinstance', '(', 'result', ',', 'GeneratorType', ')', 'or', 'not', 'isinstance', '(', 'result', ',', 'Hashable', ')', ':', 'raise', 'TypeError', '(', '"Can\'t memoize a generator or non-hashable object!"', ')', 'func', '.', '_result_cache', '[', 'key', ']', '=', 'result', '# pylint: disable-msg=W0212', 'return', 'result', 'return', '_memoized_func']
Decorator to cause a function to cache it's results for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> @memoize ... def foo(x): ... print('running function with', x) ... return x+3 ... >>> foo(10) running function with 10 13 >>> foo(10) 13 >>> foo(11) running function with 11 14 >>> @memoize ... def range_tuple(limit): ... print('running function') ... return tuple(i for i in range(limit)) ... >>> range_tuple(3) running function (0, 1, 2) >>> range_tuple(3) (0, 1, 2) >>> @memoize ... def range_iter(limit): ... print('running function') ... return (i for i in range(limit)) ... >>> range_iter(3) Traceback (most recent call last): TypeError: Can't memoize a generator or non-hashable object!
['Decorator', 'to', 'cause', 'a', 'function', 'to', 'cache', 'it', 's', 'results', 'for', 'each', 'combination', 'of', 'inputs', 'and', 'return', 'the', 'cached', 'result', 'on', 'subsequent', 'calls', '.', 'Does', 'not', 'support', 'named', 'arguments', 'or', 'arg', 'values', 'that', 'are', 'not', 'hashable', '.']
train
https://github.com/kalefranz/auxlib/blob/6ff2d6b57d128d0b9ed8f01ad83572e938da064f/auxlib/decorators.py#L10-L62
5,260
LuqueDaniel/pybooru
pybooru/api_moebooru.py
MoebooruApi_Mixin.pool_create
def pool_create(self, name, description, is_public): """Function to create a pool (Require login) (UNTESTED). Parameters: name (str): The name. description (str): A description of the pool. is_public (int): 1 or 0, whether or not the pool is public. """ params = {'pool[name]': name, 'pool[description]': description, 'pool[is_public]': is_public} return self._get('pool/create', params, method='POST')
python
def pool_create(self, name, description, is_public): """Function to create a pool (Require login) (UNTESTED). Parameters: name (str): The name. description (str): A description of the pool. is_public (int): 1 or 0, whether or not the pool is public. """ params = {'pool[name]': name, 'pool[description]': description, 'pool[is_public]': is_public} return self._get('pool/create', params, method='POST')
['def', 'pool_create', '(', 'self', ',', 'name', ',', 'description', ',', 'is_public', ')', ':', 'params', '=', '{', "'pool[name]'", ':', 'name', ',', "'pool[description]'", ':', 'description', ',', "'pool[is_public]'", ':', 'is_public', '}', 'return', 'self', '.', '_get', '(', "'pool/create'", ',', 'params', ',', 'method', '=', "'POST'", ')']
Function to create a pool (Require login) (UNTESTED). Parameters: name (str): The name. description (str): A description of the pool. is_public (int): 1 or 0, whether or not the pool is public.
['Function', 'to', 'create', 'a', 'pool', '(', 'Require', 'login', ')', '(', 'UNTESTED', ')', '.']
train
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_moebooru.py#L516-L526
5,261
nickw444/wtforms-webwidgets
wtforms_webwidgets/bootstrap/util.py
render_field_description
def render_field_description(field): """ Render a field description as HTML. """ if hasattr(field, 'description') and field.description != '': html = """<p class="help-block">{field.description}</p>""" html = html.format( field=field ) return HTMLString(html) return ''
python
def render_field_description(field): """ Render a field description as HTML. """ if hasattr(field, 'description') and field.description != '': html = """<p class="help-block">{field.description}</p>""" html = html.format( field=field ) return HTMLString(html) return ''
['def', 'render_field_description', '(', 'field', ')', ':', 'if', 'hasattr', '(', 'field', ',', "'description'", ')', 'and', 'field', '.', 'description', '!=', "''", ':', 'html', '=', '"""<p class="help-block">{field.description}</p>"""', 'html', '=', 'html', '.', 'format', '(', 'field', '=', 'field', ')', 'return', 'HTMLString', '(', 'html', ')', 'return', "''"]
Render a field description as HTML.
['Render', 'a', 'field', 'description', 'as', 'HTML', '.']
train
https://github.com/nickw444/wtforms-webwidgets/blob/88f224b68c0b0f4f5c97de39fe1428b96e12f8db/wtforms_webwidgets/bootstrap/util.py#L19-L30
5,262
rsmuc/health_monitoring_plugins
health_monitoring_plugins/check_snmp_apc_ups/check_snmp_apc_ups.py
check_runtime_remaining
def check_runtime_remaining(the_session, the_helper, the_snmp_value): """ OID .1.3.6.1.4.1.318.1.1.1.2.2.3.0 MIB excerpt The UPS battery run time remaining before battery exhaustion. SNMP value is in TimeTicks aka hundredths of a second """ a_minute_value = calc_minutes_from_ticks(the_snmp_value) the_helper.add_metric( label=the_helper.options.type, value=a_minute_value, warn=the_helper.options.warning, crit=the_helper.options.critical, uom="Minutes") the_helper.check_all_metrics() the_helper.set_summary("Remaining runtime on battery is {} minutes".format(a_minute_value))
python
def check_runtime_remaining(the_session, the_helper, the_snmp_value): """ OID .1.3.6.1.4.1.318.1.1.1.2.2.3.0 MIB excerpt The UPS battery run time remaining before battery exhaustion. SNMP value is in TimeTicks aka hundredths of a second """ a_minute_value = calc_minutes_from_ticks(the_snmp_value) the_helper.add_metric( label=the_helper.options.type, value=a_minute_value, warn=the_helper.options.warning, crit=the_helper.options.critical, uom="Minutes") the_helper.check_all_metrics() the_helper.set_summary("Remaining runtime on battery is {} minutes".format(a_minute_value))
['def', 'check_runtime_remaining', '(', 'the_session', ',', 'the_helper', ',', 'the_snmp_value', ')', ':', 'a_minute_value', '=', 'calc_minutes_from_ticks', '(', 'the_snmp_value', ')', 'the_helper', '.', 'add_metric', '(', 'label', '=', 'the_helper', '.', 'options', '.', 'type', ',', 'value', '=', 'a_minute_value', ',', 'warn', '=', 'the_helper', '.', 'options', '.', 'warning', ',', 'crit', '=', 'the_helper', '.', 'options', '.', 'critical', ',', 'uom', '=', '"Minutes"', ')', 'the_helper', '.', 'check_all_metrics', '(', ')', 'the_helper', '.', 'set_summary', '(', '"Remaining runtime on battery is {} minutes"', '.', 'format', '(', 'a_minute_value', ')', ')']
OID .1.3.6.1.4.1.318.1.1.1.2.2.3.0 MIB excerpt The UPS battery run time remaining before battery exhaustion. SNMP value is in TimeTicks aka hundredths of a second
['OID', '.', '1', '.', '3', '.', '6', '.', '1', '.', '4', '.', '1', '.', '318', '.', '1', '.', '1', '.', '1', '.', '2', '.', '2', '.', '3', '.', '0', 'MIB', 'excerpt', 'The', 'UPS', 'battery', 'run', 'time', 'remaining', 'before', 'battery', 'exhaustion', '.', 'SNMP', 'value', 'is', 'in', 'TimeTicks', 'aka', 'hundredths', 'of', 'a', 'second']
train
https://github.com/rsmuc/health_monitoring_plugins/blob/7ac29dfb9fe46c055b018cb72ad0d7d8065589b9/health_monitoring_plugins/check_snmp_apc_ups/check_snmp_apc_ups.py#L160-L176
5,263
boriel/zxbasic
arch/zx48k/backend/__init__.py
_varx
def _varx(ins): """ Defines a memory space with a default CONSTANT expression 1st parameter is the var name 2nd parameter is the type-size (u8 or i8 for byte, u16 or i16 for word, etc) 3rd parameter is the list of expressions. All of them will be converted to the type required. """ output = [] output.append('%s:' % ins.quad[1]) q = eval(ins.quad[3]) if ins.quad[2] in ('i8', 'u8'): size = 'B' elif ins.quad[2] in ('i16', 'u16'): size = 'W' elif ins.quad[2] in ('i32', 'u32'): size = 'W' z = list() for expr in q: z.extend(['(%s) & 0xFFFF' % expr, '(%s) >> 16' % expr]) q = z else: raise InvalidIC(ins.quad, 'Unimplemented vard size: %s' % ins.quad[2]) for x in q: output.append('DEF%s %s' % (size, x)) return output
python
def _varx(ins): """ Defines a memory space with a default CONSTANT expression 1st parameter is the var name 2nd parameter is the type-size (u8 or i8 for byte, u16 or i16 for word, etc) 3rd parameter is the list of expressions. All of them will be converted to the type required. """ output = [] output.append('%s:' % ins.quad[1]) q = eval(ins.quad[3]) if ins.quad[2] in ('i8', 'u8'): size = 'B' elif ins.quad[2] in ('i16', 'u16'): size = 'W' elif ins.quad[2] in ('i32', 'u32'): size = 'W' z = list() for expr in q: z.extend(['(%s) & 0xFFFF' % expr, '(%s) >> 16' % expr]) q = z else: raise InvalidIC(ins.quad, 'Unimplemented vard size: %s' % ins.quad[2]) for x in q: output.append('DEF%s %s' % (size, x)) return output
['def', '_varx', '(', 'ins', ')', ':', 'output', '=', '[', ']', 'output', '.', 'append', '(', "'%s:'", '%', 'ins', '.', 'quad', '[', '1', ']', ')', 'q', '=', 'eval', '(', 'ins', '.', 'quad', '[', '3', ']', ')', 'if', 'ins', '.', 'quad', '[', '2', ']', 'in', '(', "'i8'", ',', "'u8'", ')', ':', 'size', '=', "'B'", 'elif', 'ins', '.', 'quad', '[', '2', ']', 'in', '(', "'i16'", ',', "'u16'", ')', ':', 'size', '=', "'W'", 'elif', 'ins', '.', 'quad', '[', '2', ']', 'in', '(', "'i32'", ',', "'u32'", ')', ':', 'size', '=', "'W'", 'z', '=', 'list', '(', ')', 'for', 'expr', 'in', 'q', ':', 'z', '.', 'extend', '(', '[', "'(%s) & 0xFFFF'", '%', 'expr', ',', "'(%s) >> 16'", '%', 'expr', ']', ')', 'q', '=', 'z', 'else', ':', 'raise', 'InvalidIC', '(', 'ins', '.', 'quad', ',', "'Unimplemented vard size: %s'", '%', 'ins', '.', 'quad', '[', '2', ']', ')', 'for', 'x', 'in', 'q', ':', 'output', '.', 'append', '(', "'DEF%s %s'", '%', '(', 'size', ',', 'x', ')', ')', 'return', 'output']
Defines a memory space with a default CONSTANT expression 1st parameter is the var name 2nd parameter is the type-size (u8 or i8 for byte, u16 or i16 for word, etc) 3rd parameter is the list of expressions. All of them will be converted to the type required.
['Defines', 'a', 'memory', 'space', 'with', 'a', 'default', 'CONSTANT', 'expression', '1st', 'parameter', 'is', 'the', 'var', 'name', '2nd', 'parameter', 'is', 'the', 'type', '-', 'size', '(', 'u8', 'or', 'i8', 'for', 'byte', 'u16', 'or', 'i16', 'for', 'word', 'etc', ')', '3rd', 'parameter', 'is', 'the', 'list', 'of', 'expressions', '.', 'All', 'of', 'them', 'will', 'be', 'converted', 'to', 'the', 'type', 'required', '.']
train
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__init__.py#L497-L524
5,264
trombastic/PyScada
pyscada/visa/devices/DS1Z.py
Handler.read_data
def read_data(self, variable_instance): """ read values from the device """ if self.inst is None: return if variable_instance.visavariable.device_property.upper() == 'vrms_chan1': return self.parse_value(self.inst.query(':MEAS:ITEM? VRMS,CHAN1')) return None
python
def read_data(self, variable_instance): """ read values from the device """ if self.inst is None: return if variable_instance.visavariable.device_property.upper() == 'vrms_chan1': return self.parse_value(self.inst.query(':MEAS:ITEM? VRMS,CHAN1')) return None
['def', 'read_data', '(', 'self', ',', 'variable_instance', ')', ':', 'if', 'self', '.', 'inst', 'is', 'None', ':', 'return', 'if', 'variable_instance', '.', 'visavariable', '.', 'device_property', '.', 'upper', '(', ')', '==', "'vrms_chan1'", ':', 'return', 'self', '.', 'parse_value', '(', 'self', '.', 'inst', '.', 'query', '(', "':MEAS:ITEM? VRMS,CHAN1'", ')', ')', 'return', 'None']
read values from the device
['read', 'values', 'from', 'the', 'device']
train
https://github.com/trombastic/PyScada/blob/c5fc348a25f0df1340336f694ee9bc1aea62516a/pyscada/visa/devices/DS1Z.py#L11-L19
5,265
Projectplace/basepage
basepage/base_page.py
BasePage.get_visible_elements
def get_visible_elements(self, locator, params=None, timeout=None): """ Get elements both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance """ return self.get_present_elements(locator, params, timeout, True)
python
def get_visible_elements(self, locator, params=None, timeout=None): """ Get elements both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance """ return self.get_present_elements(locator, params, timeout, True)
['def', 'get_visible_elements', '(', 'self', ',', 'locator', ',', 'params', '=', 'None', ',', 'timeout', '=', 'None', ')', ':', 'return', 'self', '.', 'get_present_elements', '(', 'locator', ',', 'params', ',', 'timeout', ',', 'True', ')']
Get elements both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance
['Get', 'elements', 'both', 'present', 'AND', 'visible', 'in', 'the', 'DOM', '.']
train
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L450-L462
5,266
gwastro/pycbc
pycbc/conversions.py
mass1_from_mass2_eta
def mass1_from_mass2_eta(mass2, eta, force_real=True): """Returns the primary mass from the secondary mass and symmetric mass ratio. """ return mass_from_knownmass_eta(mass2, eta, known_is_secondary=True, force_real=force_real)
python
def mass1_from_mass2_eta(mass2, eta, force_real=True): """Returns the primary mass from the secondary mass and symmetric mass ratio. """ return mass_from_knownmass_eta(mass2, eta, known_is_secondary=True, force_real=force_real)
['def', 'mass1_from_mass2_eta', '(', 'mass2', ',', 'eta', ',', 'force_real', '=', 'True', ')', ':', 'return', 'mass_from_knownmass_eta', '(', 'mass2', ',', 'eta', ',', 'known_is_secondary', '=', 'True', ',', 'force_real', '=', 'force_real', ')']
Returns the primary mass from the secondary mass and symmetric mass ratio.
['Returns', 'the', 'primary', 'mass', 'from', 'the', 'secondary', 'mass', 'and', 'symmetric', 'mass', 'ratio', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/conversions.py#L279-L284
5,267
googleapis/google-auth-library-python-oauthlib
google_auth_oauthlib/flow.py
Flow.from_client_config
def from_client_config(cls, client_config, scopes, **kwargs): """Creates a :class:`requests_oauthlib.OAuth2Session` from client configuration loaded from a Google-format client secrets file. Args: client_config (Mapping[str, Any]): The client configuration in the Google `client secrets`_ format. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Flow: The constructed Flow instance. Raises: ValueError: If the client configuration is not in the correct format. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets """ if 'web' in client_config: client_type = 'web' elif 'installed' in client_config: client_type = 'installed' else: raise ValueError( 'Client secrets must be for a web or installed app.') session, client_config = ( google_auth_oauthlib.helpers.session_from_client_config( client_config, scopes, **kwargs)) redirect_uri = kwargs.get('redirect_uri', None) return cls(session, client_type, client_config, redirect_uri)
python
def from_client_config(cls, client_config, scopes, **kwargs): """Creates a :class:`requests_oauthlib.OAuth2Session` from client configuration loaded from a Google-format client secrets file. Args: client_config (Mapping[str, Any]): The client configuration in the Google `client secrets`_ format. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Flow: The constructed Flow instance. Raises: ValueError: If the client configuration is not in the correct format. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets """ if 'web' in client_config: client_type = 'web' elif 'installed' in client_config: client_type = 'installed' else: raise ValueError( 'Client secrets must be for a web or installed app.') session, client_config = ( google_auth_oauthlib.helpers.session_from_client_config( client_config, scopes, **kwargs)) redirect_uri = kwargs.get('redirect_uri', None) return cls(session, client_type, client_config, redirect_uri)
['def', 'from_client_config', '(', 'cls', ',', 'client_config', ',', 'scopes', ',', '*', '*', 'kwargs', ')', ':', 'if', "'web'", 'in', 'client_config', ':', 'client_type', '=', "'web'", 'elif', "'installed'", 'in', 'client_config', ':', 'client_type', '=', "'installed'", 'else', ':', 'raise', 'ValueError', '(', "'Client secrets must be for a web or installed app.'", ')', 'session', ',', 'client_config', '=', '(', 'google_auth_oauthlib', '.', 'helpers', '.', 'session_from_client_config', '(', 'client_config', ',', 'scopes', ',', '*', '*', 'kwargs', ')', ')', 'redirect_uri', '=', 'kwargs', '.', 'get', '(', "'redirect_uri'", ',', 'None', ')', 'return', 'cls', '(', 'session', ',', 'client_type', ',', 'client_config', ',', 'redirect_uri', ')']
Creates a :class:`requests_oauthlib.OAuth2Session` from client configuration loaded from a Google-format client secrets file. Args: client_config (Mapping[str, Any]): The client configuration in the Google `client secrets`_ format. scopes (Sequence[str]): The list of scopes to request during the flow. kwargs: Any additional parameters passed to :class:`requests_oauthlib.OAuth2Session` Returns: Flow: The constructed Flow instance. Raises: ValueError: If the client configuration is not in the correct format. .. _client secrets: https://developers.google.com/api-client-library/python/guide /aaa_client_secrets
['Creates', 'a', ':', 'class', ':', 'requests_oauthlib', '.', 'OAuth2Session', 'from', 'client', 'configuration', 'loaded', 'from', 'a', 'Google', '-', 'format', 'client', 'secrets', 'file', '.']
train
https://github.com/googleapis/google-auth-library-python-oauthlib/blob/ba826565994cf20c073d79f534036747fdef2041/google_auth_oauthlib/flow.py#L118-L154
5,268
textX/textX
textx/scoping/providers.py
GlobalRepo.load_models_in_model_repo
def load_models_in_model_repo(self, global_model_repo=None, encoding='utf-8'): """ load all registered models (called explicitly from the user and not as an automatic activity). Normally this is done automatically while reference resolution of one loaded model. However, if you wish to load all models you can call this and get a model repository. The metamodels must be identifiable via the MetaModelProvider. Returns: a GlobalModelRepository with the loaded models """ import textx.scoping if not global_model_repo: global_model_repo = textx.scoping.GlobalModelRepository() for filename_pattern in self.filename_pattern_list: global_model_repo.load_models_using_filepattern( filename_pattern, model=None, glob_args=self.glob_args, is_main_model=True, encoding=encoding ) return global_model_repo
python
def load_models_in_model_repo(self, global_model_repo=None, encoding='utf-8'): """ load all registered models (called explicitly from the user and not as an automatic activity). Normally this is done automatically while reference resolution of one loaded model. However, if you wish to load all models you can call this and get a model repository. The metamodels must be identifiable via the MetaModelProvider. Returns: a GlobalModelRepository with the loaded models """ import textx.scoping if not global_model_repo: global_model_repo = textx.scoping.GlobalModelRepository() for filename_pattern in self.filename_pattern_list: global_model_repo.load_models_using_filepattern( filename_pattern, model=None, glob_args=self.glob_args, is_main_model=True, encoding=encoding ) return global_model_repo
['def', 'load_models_in_model_repo', '(', 'self', ',', 'global_model_repo', '=', 'None', ',', 'encoding', '=', "'utf-8'", ')', ':', 'import', 'textx', '.', 'scoping', 'if', 'not', 'global_model_repo', ':', 'global_model_repo', '=', 'textx', '.', 'scoping', '.', 'GlobalModelRepository', '(', ')', 'for', 'filename_pattern', 'in', 'self', '.', 'filename_pattern_list', ':', 'global_model_repo', '.', 'load_models_using_filepattern', '(', 'filename_pattern', ',', 'model', '=', 'None', ',', 'glob_args', '=', 'self', '.', 'glob_args', ',', 'is_main_model', '=', 'True', ',', 'encoding', '=', 'encoding', ')', 'return', 'global_model_repo']
load all registered models (called explicitly from the user and not as an automatic activity). Normally this is done automatically while reference resolution of one loaded model. However, if you wish to load all models you can call this and get a model repository. The metamodels must be identifiable via the MetaModelProvider. Returns: a GlobalModelRepository with the loaded models
['load', 'all', 'registered', 'models', '(', 'called', 'explicitly', 'from', 'the', 'user', 'and', 'not', 'as', 'an', 'automatic', 'activity', ')', '.', 'Normally', 'this', 'is', 'done', 'automatically', 'while', 'reference', 'resolution', 'of', 'one', 'loaded', 'model', '.']
train
https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/scoping/providers.py#L480-L504
5,269
joeferraro/mm
mm/sforce/base.py
SforceBaseClient.queryAll
def queryAll(self, queryString): ''' Retrieves data from specified objects, whether or not they have been deleted. ''' self._setHeaders('queryAll') return self._sforce.service.queryAll(queryString)
python
def queryAll(self, queryString): ''' Retrieves data from specified objects, whether or not they have been deleted. ''' self._setHeaders('queryAll') return self._sforce.service.queryAll(queryString)
['def', 'queryAll', '(', 'self', ',', 'queryString', ')', ':', 'self', '.', '_setHeaders', '(', "'queryAll'", ')', 'return', 'self', '.', '_sforce', '.', 'service', '.', 'queryAll', '(', 'queryString', ')']
Retrieves data from specified objects, whether or not they have been deleted.
['Retrieves', 'data', 'from', 'specified', 'objects', 'whether', 'or', 'not', 'they', 'have', 'been', 'deleted', '.']
train
https://github.com/joeferraro/mm/blob/43dce48a2249faab4d872c228ada9fbdbeec147b/mm/sforce/base.py#L531-L536
5,270
LonamiWebs/Telethon
telethon_examples/interactive_telegram_client.py
InteractiveTelegramClient.send_document
async def send_document(self, path, entity): """Sends the file located at path to the desired entity as a document""" await self.send_file( entity, path, force_document=True, progress_callback=self.upload_progress_callback ) print('Document sent!')
python
async def send_document(self, path, entity): """Sends the file located at path to the desired entity as a document""" await self.send_file( entity, path, force_document=True, progress_callback=self.upload_progress_callback ) print('Document sent!')
['async', 'def', 'send_document', '(', 'self', ',', 'path', ',', 'entity', ')', ':', 'await', 'self', '.', 'send_file', '(', 'entity', ',', 'path', ',', 'force_document', '=', 'True', ',', 'progress_callback', '=', 'self', '.', 'upload_progress_callback', ')', 'print', '(', "'Document sent!'", ')']
Sends the file located at path to the desired entity as a document
['Sends', 'the', 'file', 'located', 'at', 'path', 'to', 'the', 'desired', 'entity', 'as', 'a', 'document']
train
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_examples/interactive_telegram_client.py#L320-L327
5,271
joyent/python-manta
manta/client.py
RawMantaClient.list_directory
def list_directory(self, mdir, limit=None, marker=None): """ListDirectory https://apidocs.joyent.com/manta/api.html#ListDirectory @param mdir {str} A manta path, e.g. '/trent/stor/mydir'. @param limit {int} Limits the number of records to come back (default and max is 1000). @param marker {str} Key name at which to start the next listing. @returns Directory entries (dirents). E.g.: [{u'mtime': u'2012-12-11T01:54:07Z', u'name': u'play', u'type': u'directory'}, ...] """ res, dirents = self.list_directory2(mdir, limit=limit, marker=marker) return dirents
python
def list_directory(self, mdir, limit=None, marker=None): """ListDirectory https://apidocs.joyent.com/manta/api.html#ListDirectory @param mdir {str} A manta path, e.g. '/trent/stor/mydir'. @param limit {int} Limits the number of records to come back (default and max is 1000). @param marker {str} Key name at which to start the next listing. @returns Directory entries (dirents). E.g.: [{u'mtime': u'2012-12-11T01:54:07Z', u'name': u'play', u'type': u'directory'}, ...] """ res, dirents = self.list_directory2(mdir, limit=limit, marker=marker) return dirents
['def', 'list_directory', '(', 'self', ',', 'mdir', ',', 'limit', '=', 'None', ',', 'marker', '=', 'None', ')', ':', 'res', ',', 'dirents', '=', 'self', '.', 'list_directory2', '(', 'mdir', ',', 'limit', '=', 'limit', ',', 'marker', '=', 'marker', ')', 'return', 'dirents']
ListDirectory https://apidocs.joyent.com/manta/api.html#ListDirectory @param mdir {str} A manta path, e.g. '/trent/stor/mydir'. @param limit {int} Limits the number of records to come back (default and max is 1000). @param marker {str} Key name at which to start the next listing. @returns Directory entries (dirents). E.g.: [{u'mtime': u'2012-12-11T01:54:07Z', u'name': u'play', u'type': u'directory'}, ...]
['ListDirectory', 'https', ':', '//', 'apidocs', '.', 'joyent', '.', 'com', '/', 'manta', '/', 'api', '.', 'html#ListDirectory']
train
https://github.com/joyent/python-manta/blob/f68ef142bdbac058c981e3b28e18d77612f5b7c6/manta/client.py#L234-L247
5,272
jaraco/path.py
path/__init__.py
Path.in_place
def in_place( self, mode='r', buffering=-1, encoding=None, errors=None, newline=None, backup_extension=None, ): """ A context in which a file may be re-written in-place with new content. Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` replaces `readable`. If an exception occurs, the old file is restored, removing the written data. Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are allowed. A :exc:`ValueError` is raised on invalid modes. For example, to add line numbers to a file:: p = Path(filename) assert p.isfile() with p.in_place() as (reader, writer): for number, line in enumerate(reader, 1): writer.write('{0:3}: '.format(number))) writer.write(line) Thereafter, the file at `filename` will have line numbers in it. """ import io if set(mode).intersection('wa+'): raise ValueError('Only read-only file modes can be used') # move existing file to backup, create new file with same permissions # borrowed extensively from the fileinput module backup_fn = self + (backup_extension or os.extsep + 'bak') try: os.unlink(backup_fn) except os.error: pass os.rename(self, backup_fn) readable = io.open( backup_fn, mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: perm = os.fstat(readable.fileno()).st_mode except OSError: writable = open( self, 'w' + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) else: os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC if hasattr(os, 'O_BINARY'): os_mode |= os.O_BINARY fd = os.open(self, os_mode, perm) writable = io.open( fd, "w" + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: if hasattr(os, 'chmod'): os.chmod(self, perm) except OSError: pass try: yield readable, writable except Exception: # move backup back readable.close() writable.close() try: os.unlink(self) except os.error: pass os.rename(backup_fn, self) raise else: readable.close() writable.close() finally: try: os.unlink(backup_fn) except os.error: pass
python
def in_place( self, mode='r', buffering=-1, encoding=None, errors=None, newline=None, backup_extension=None, ): """ A context in which a file may be re-written in-place with new content. Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` replaces `readable`. If an exception occurs, the old file is restored, removing the written data. Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are allowed. A :exc:`ValueError` is raised on invalid modes. For example, to add line numbers to a file:: p = Path(filename) assert p.isfile() with p.in_place() as (reader, writer): for number, line in enumerate(reader, 1): writer.write('{0:3}: '.format(number))) writer.write(line) Thereafter, the file at `filename` will have line numbers in it. """ import io if set(mode).intersection('wa+'): raise ValueError('Only read-only file modes can be used') # move existing file to backup, create new file with same permissions # borrowed extensively from the fileinput module backup_fn = self + (backup_extension or os.extsep + 'bak') try: os.unlink(backup_fn) except os.error: pass os.rename(self, backup_fn) readable = io.open( backup_fn, mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: perm = os.fstat(readable.fileno()).st_mode except OSError: writable = open( self, 'w' + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) else: os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC if hasattr(os, 'O_BINARY'): os_mode |= os.O_BINARY fd = os.open(self, os_mode, perm) writable = io.open( fd, "w" + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: if hasattr(os, 'chmod'): os.chmod(self, perm) except OSError: pass try: yield readable, writable except Exception: # move backup back readable.close() writable.close() try: os.unlink(self) except os.error: pass os.rename(backup_fn, self) raise else: readable.close() writable.close() finally: try: os.unlink(backup_fn) except os.error: pass
['def', 'in_place', '(', 'self', ',', 'mode', '=', "'r'", ',', 'buffering', '=', '-', '1', ',', 'encoding', '=', 'None', ',', 'errors', '=', 'None', ',', 'newline', '=', 'None', ',', 'backup_extension', '=', 'None', ',', ')', ':', 'import', 'io', 'if', 'set', '(', 'mode', ')', '.', 'intersection', '(', "'wa+'", ')', ':', 'raise', 'ValueError', '(', "'Only read-only file modes can be used'", ')', '# move existing file to backup, create new file with same permissions', '# borrowed extensively from the fileinput module', 'backup_fn', '=', 'self', '+', '(', 'backup_extension', 'or', 'os', '.', 'extsep', '+', "'bak'", ')', 'try', ':', 'os', '.', 'unlink', '(', 'backup_fn', ')', 'except', 'os', '.', 'error', ':', 'pass', 'os', '.', 'rename', '(', 'self', ',', 'backup_fn', ')', 'readable', '=', 'io', '.', 'open', '(', 'backup_fn', ',', 'mode', ',', 'buffering', '=', 'buffering', ',', 'encoding', '=', 'encoding', ',', 'errors', '=', 'errors', ',', 'newline', '=', 'newline', ',', ')', 'try', ':', 'perm', '=', 'os', '.', 'fstat', '(', 'readable', '.', 'fileno', '(', ')', ')', '.', 'st_mode', 'except', 'OSError', ':', 'writable', '=', 'open', '(', 'self', ',', "'w'", '+', 'mode', '.', 'replace', '(', "'r'", ',', "''", ')', ',', 'buffering', '=', 'buffering', ',', 'encoding', '=', 'encoding', ',', 'errors', '=', 'errors', ',', 'newline', '=', 'newline', ',', ')', 'else', ':', 'os_mode', '=', 'os', '.', 'O_CREAT', '|', 'os', '.', 'O_WRONLY', '|', 'os', '.', 'O_TRUNC', 'if', 'hasattr', '(', 'os', ',', "'O_BINARY'", ')', ':', 'os_mode', '|=', 'os', '.', 'O_BINARY', 'fd', '=', 'os', '.', 'open', '(', 'self', ',', 'os_mode', ',', 'perm', ')', 'writable', '=', 'io', '.', 'open', '(', 'fd', ',', '"w"', '+', 'mode', '.', 'replace', '(', "'r'", ',', "''", ')', ',', 'buffering', '=', 'buffering', ',', 'encoding', '=', 'encoding', ',', 'errors', '=', 'errors', ',', 'newline', '=', 'newline', ',', ')', 'try', ':', 'if', 'hasattr', '(', 'os', ',', "'chmod'", ')', ':', 'os', '.', 'chmod', '(', 'self', ',', 'perm', ')', 'except', 'OSError', ':', 'pass', 'try', ':', 'yield', 'readable', ',', 'writable', 'except', 'Exception', ':', '# move backup back', 'readable', '.', 'close', '(', ')', 'writable', '.', 'close', '(', ')', 'try', ':', 'os', '.', 'unlink', '(', 'self', ')', 'except', 'os', '.', 'error', ':', 'pass', 'os', '.', 'rename', '(', 'backup_fn', ',', 'self', ')', 'raise', 'else', ':', 'readable', '.', 'close', '(', ')', 'writable', '.', 'close', '(', ')', 'finally', ':', 'try', ':', 'os', '.', 'unlink', '(', 'backup_fn', ')', 'except', 'os', '.', 'error', ':', 'pass']
A context in which a file may be re-written in-place with new content. Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` replaces `readable`. If an exception occurs, the old file is restored, removing the written data. Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are allowed. A :exc:`ValueError` is raised on invalid modes. For example, to add line numbers to a file:: p = Path(filename) assert p.isfile() with p.in_place() as (reader, writer): for number, line in enumerate(reader, 1): writer.write('{0:3}: '.format(number))) writer.write(line) Thereafter, the file at `filename` will have line numbers in it.
['A', 'context', 'in', 'which', 'a', 'file', 'may', 'be', 're', '-', 'written', 'in', '-', 'place', 'with', 'new', 'content', '.']
train
https://github.com/jaraco/path.py/blob/bbe7d99e7a64a004f866ace9ec12bd9b296908f5/path/__init__.py#L1336-L1424
5,273
equinor/segyio
python/segyio/field.py
Field.update
def update(self, *args, **kwargs): """d.update([E, ]**F) -> None. Update D from mapping/iterable E and F. Overwrite the values in `d` with the keys from `E` and `F`. If any key in `value` is invalid in `d`, ``KeyError`` is raised. This method is atomic - either all values in `value` are set in `d`, or none are. ``update`` does not commit a partially-updated version to disk. For kwargs, Seismic Unix-style names are supported. `BinField` and `TraceField` are not, because there are name collisions between them, although this restriction may be lifted in the future. Notes ----- .. versionchanged:: 1.3 Support for common dict operations (update, keys, values) .. versionchanged:: 1.6 Atomicity guarantee .. versionchanged:: 1.6 `**kwargs` support Examples -------- >>> e = { 1: 10, 9: 5 } >>> d.update(e) >>> l = [ (105, 11), (169, 4) ] >>> d.update(l) >>> d.update(e, iline=189, xline=193, hour=5) >>> d.update(sx=7) """ if len(args) > 1: msg = 'update expected at most 1 non-keyword argument, got {}' raise TypeError(msg.format(len(args))) buf = bytearray(self.buf) # Implementation largely borrowed from collections.mapping # If E present and has a .keys() method: for k in E: D[k] = E[k] # If E present and lacks .keys() method: for (k, v) in E: D[k] = v # In either case, this is followed by: for k, v in F.items(): D[k] = v if len(args) == 1: other = args[0] if isinstance(other, collections.Mapping): for key in other: self.putfield(buf, int(key), other[key]) elif hasattr(other, "keys"): for key in other.keys(): self.putfield(buf, int(key), other[key]) else: for key, value in other: self.putfield(buf, int(key), value) for key, value in kwargs.items(): self.putfield(buf, int(self._kwargs[key]), value) self.buf = buf self.flush()
python
def update(self, *args, **kwargs): """d.update([E, ]**F) -> None. Update D from mapping/iterable E and F. Overwrite the values in `d` with the keys from `E` and `F`. If any key in `value` is invalid in `d`, ``KeyError`` is raised. This method is atomic - either all values in `value` are set in `d`, or none are. ``update`` does not commit a partially-updated version to disk. For kwargs, Seismic Unix-style names are supported. `BinField` and `TraceField` are not, because there are name collisions between them, although this restriction may be lifted in the future. Notes ----- .. versionchanged:: 1.3 Support for common dict operations (update, keys, values) .. versionchanged:: 1.6 Atomicity guarantee .. versionchanged:: 1.6 `**kwargs` support Examples -------- >>> e = { 1: 10, 9: 5 } >>> d.update(e) >>> l = [ (105, 11), (169, 4) ] >>> d.update(l) >>> d.update(e, iline=189, xline=193, hour=5) >>> d.update(sx=7) """ if len(args) > 1: msg = 'update expected at most 1 non-keyword argument, got {}' raise TypeError(msg.format(len(args))) buf = bytearray(self.buf) # Implementation largely borrowed from collections.mapping # If E present and has a .keys() method: for k in E: D[k] = E[k] # If E present and lacks .keys() method: for (k, v) in E: D[k] = v # In either case, this is followed by: for k, v in F.items(): D[k] = v if len(args) == 1: other = args[0] if isinstance(other, collections.Mapping): for key in other: self.putfield(buf, int(key), other[key]) elif hasattr(other, "keys"): for key in other.keys(): self.putfield(buf, int(key), other[key]) else: for key, value in other: self.putfield(buf, int(key), value) for key, value in kwargs.items(): self.putfield(buf, int(self._kwargs[key]), value) self.buf = buf self.flush()
['def', 'update', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'len', '(', 'args', ')', '>', '1', ':', 'msg', '=', "'update expected at most 1 non-keyword argument, got {}'", 'raise', 'TypeError', '(', 'msg', '.', 'format', '(', 'len', '(', 'args', ')', ')', ')', 'buf', '=', 'bytearray', '(', 'self', '.', 'buf', ')', '# Implementation largely borrowed from collections.mapping', '# If E present and has a .keys() method: for k in E: D[k] = E[k]', '# If E present and lacks .keys() method: for (k, v) in E: D[k] = v', '# In either case, this is followed by: for k, v in F.items(): D[k] = v', 'if', 'len', '(', 'args', ')', '==', '1', ':', 'other', '=', 'args', '[', '0', ']', 'if', 'isinstance', '(', 'other', ',', 'collections', '.', 'Mapping', ')', ':', 'for', 'key', 'in', 'other', ':', 'self', '.', 'putfield', '(', 'buf', ',', 'int', '(', 'key', ')', ',', 'other', '[', 'key', ']', ')', 'elif', 'hasattr', '(', 'other', ',', '"keys"', ')', ':', 'for', 'key', 'in', 'other', '.', 'keys', '(', ')', ':', 'self', '.', 'putfield', '(', 'buf', ',', 'int', '(', 'key', ')', ',', 'other', '[', 'key', ']', ')', 'else', ':', 'for', 'key', ',', 'value', 'in', 'other', ':', 'self', '.', 'putfield', '(', 'buf', ',', 'int', '(', 'key', ')', ',', 'value', ')', 'for', 'key', ',', 'value', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'self', '.', 'putfield', '(', 'buf', ',', 'int', '(', 'self', '.', '_kwargs', '[', 'key', ']', ')', ',', 'value', ')', 'self', '.', 'buf', '=', 'buf', 'self', '.', 'flush', '(', ')']
d.update([E, ]**F) -> None. Update D from mapping/iterable E and F. Overwrite the values in `d` with the keys from `E` and `F`. If any key in `value` is invalid in `d`, ``KeyError`` is raised. This method is atomic - either all values in `value` are set in `d`, or none are. ``update`` does not commit a partially-updated version to disk. For kwargs, Seismic Unix-style names are supported. `BinField` and `TraceField` are not, because there are name collisions between them, although this restriction may be lifted in the future. Notes ----- .. versionchanged:: 1.3 Support for common dict operations (update, keys, values) .. versionchanged:: 1.6 Atomicity guarantee .. versionchanged:: 1.6 `**kwargs` support Examples -------- >>> e = { 1: 10, 9: 5 } >>> d.update(e) >>> l = [ (105, 11), (169, 4) ] >>> d.update(l) >>> d.update(e, iline=189, xline=193, hour=5) >>> d.update(sx=7)
['d', '.', 'update', '(', '[', 'E', ']', '**', 'F', ')', '-', '>', 'None', '.', 'Update', 'D', 'from', 'mapping', '/', 'iterable', 'E', 'and', 'F', '.']
train
https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/field.py#L453-L515
5,274
biosignalsnotebooks/biosignalsnotebooks
biosignalsnotebooks/build/lib/biosignalsnotebooks/external_packages/novainstrumentation/freq_analysis.py
max_frequency
def max_frequency (sig,FS): """Compute max frequency along the specified axes. Parameters ---------- sig: ndarray input from which max frequency is computed. FS: int sampling frequency Returns ------- f_max: int 0.95 of max_frequency using cumsum. """ f, fs = plotfft(sig, FS, doplot=False) t = cumsum(fs) ind_mag = find (t>t[-1]*0.95)[0] f_max=f[ind_mag] return f_max
python
def max_frequency (sig,FS): """Compute max frequency along the specified axes. Parameters ---------- sig: ndarray input from which max frequency is computed. FS: int sampling frequency Returns ------- f_max: int 0.95 of max_frequency using cumsum. """ f, fs = plotfft(sig, FS, doplot=False) t = cumsum(fs) ind_mag = find (t>t[-1]*0.95)[0] f_max=f[ind_mag] return f_max
['def', 'max_frequency', '(', 'sig', ',', 'FS', ')', ':', 'f', ',', 'fs', '=', 'plotfft', '(', 'sig', ',', 'FS', ',', 'doplot', '=', 'False', ')', 't', '=', 'cumsum', '(', 'fs', ')', 'ind_mag', '=', 'find', '(', 't', '>', 't', '[', '-', '1', ']', '*', '0.95', ')', '[', '0', ']', 'f_max', '=', 'f', '[', 'ind_mag', ']', 'return', 'f_max']
Compute max frequency along the specified axes. Parameters ---------- sig: ndarray input from which max frequency is computed. FS: int sampling frequency Returns ------- f_max: int 0.95 of max_frequency using cumsum.
['Compute', 'max', 'frequency', 'along', 'the', 'specified', 'axes', '.']
train
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/external_packages/novainstrumentation/freq_analysis.py#L52-L72
5,275
rgalanakis/goless
write_benchresults.py
stdout_to_results
def stdout_to_results(s): """Turns the multi-line output of a benchmark process into a sequence of BenchmarkResult instances.""" results = s.strip().split('\n') return [BenchmarkResult(*r.split()) for r in results]
python
def stdout_to_results(s): """Turns the multi-line output of a benchmark process into a sequence of BenchmarkResult instances.""" results = s.strip().split('\n') return [BenchmarkResult(*r.split()) for r in results]
['def', 'stdout_to_results', '(', 's', ')', ':', 'results', '=', 's', '.', 'strip', '(', ')', '.', 'split', '(', "'\\n'", ')', 'return', '[', 'BenchmarkResult', '(', '*', 'r', '.', 'split', '(', ')', ')', 'for', 'r', 'in', 'results', ']']
Turns the multi-line output of a benchmark process into a sequence of BenchmarkResult instances.
['Turns', 'the', 'multi', '-', 'line', 'output', 'of', 'a', 'benchmark', 'process', 'into', 'a', 'sequence', 'of', 'BenchmarkResult', 'instances', '.']
train
https://github.com/rgalanakis/goless/blob/286cd69482ae5a56c899a0c0d5d895772d96e83d/write_benchresults.py#L37-L41
5,276
thespacedoctor/qubits
qubits/universe.py
random_redshift_array
def random_redshift_array( log, sampleNumber, lowerRedshiftLimit, upperRedshiftLimit, redshiftResolution, pathToOutputPlotDirectory, plot=False): """ *Generate a NumPy array of random distances given a sample number and distance limit* **Key Arguments:** - ``log`` -- logger - ``sampleNumber`` -- the sample number, i.e. array size - ``lowerRedshiftLimit`` -- the lower redshift limit of the volume to be included - ``upperRedshiftLimit`` -- the upper redshift limit of the volume to be included - ``redshiftResolution`` -- the resolution of the redshift distribution - ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user) - ``plot`` -- generate plot? **Return:** - ``redshiftArray`` -- an array of random redshifts within the volume limit """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## import matplotlib.pyplot as plt import numpy as np import numpy.random as npr ## LOCAL APPLICATION ## import dryxPython.astrotools as da redshiftDistribution = np.arange( 0., upperRedshiftLimit, redshiftResolution) closestNumber = lambda n, l: min(l, key=lambda x: abs(x - n)) # GIVEN THE REDSHIFT LIMIT - DETERMINE THE VOLUME LIMIT distanceDictionary = da.convert_redshift_to_distance(upperRedshiftLimit) upperMpcLimit = distanceDictionary["dl_mpc"] upperVolumeLimit = (4. / 3.) * np.pi * upperMpcLimit ** 3 if lowerRedshiftLimit == 0.: lowerVolumeLimit = 0. else: distanceDictionary = da.convert_redshift_to_distance( lowerRedshiftLimit) lowerMpcLimit = distanceDictionary["dl_mpc"] lowerVolumeLimit = (4. / 3.) * np.pi * lowerMpcLimit ** 3 volumeShell = upperVolumeLimit - lowerVolumeLimit # GENERATE A LIST OF RANDOM DISTANCES redshiftList = [] for i in range(sampleNumber): randomVolume = lowerVolumeLimit + npr.random() * volumeShell randomDistance = (randomVolume * (3. / 4.) / np.pi) ** (1. / 3.) randomRedshift = da.convert_mpc_to_redshift(randomDistance) randomRedshift = closestNumber(randomRedshift, redshiftDistribution) # log.debug('randomDistance %s' % (randomDistance,)) redshiftList.append(randomRedshift) redshiftArray = np.array(redshiftList) # log.info('redshiftArray %s' % (redshiftArray,)) if plot: # FORCE SQUARE FIGURE AND SQUARE AXES LOOKS BETTER FOR POLAR fig = plt.figure( num=None, figsize=(8, 8), dpi=None, facecolor=None, edgecolor=None, frameon=True) ax = fig.add_axes( [0.1, 0.1, 0.8, 0.8], polar=True) thetaList = [] twoPi = 2. * np.pi for i in range(sampleNumber): thetaList.append(twoPi * npr.random()) thetaArray = np.array(thetaList) plt.scatter( thetaArray, redshiftArray, s=10, c='b', marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=None, linewidths=None, edgecolor='w', verts=None, hold=None) title = "SN Redshift Distribution" plt.title(title) fileName = pathToOutputPlotDirectory + title.replace(" ", "_") + ".png" plt.savefig(fileName) plt.clf() # clear figure return redshiftArray
python
def random_redshift_array( log, sampleNumber, lowerRedshiftLimit, upperRedshiftLimit, redshiftResolution, pathToOutputPlotDirectory, plot=False): """ *Generate a NumPy array of random distances given a sample number and distance limit* **Key Arguments:** - ``log`` -- logger - ``sampleNumber`` -- the sample number, i.e. array size - ``lowerRedshiftLimit`` -- the lower redshift limit of the volume to be included - ``upperRedshiftLimit`` -- the upper redshift limit of the volume to be included - ``redshiftResolution`` -- the resolution of the redshift distribution - ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user) - ``plot`` -- generate plot? **Return:** - ``redshiftArray`` -- an array of random redshifts within the volume limit """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## import matplotlib.pyplot as plt import numpy as np import numpy.random as npr ## LOCAL APPLICATION ## import dryxPython.astrotools as da redshiftDistribution = np.arange( 0., upperRedshiftLimit, redshiftResolution) closestNumber = lambda n, l: min(l, key=lambda x: abs(x - n)) # GIVEN THE REDSHIFT LIMIT - DETERMINE THE VOLUME LIMIT distanceDictionary = da.convert_redshift_to_distance(upperRedshiftLimit) upperMpcLimit = distanceDictionary["dl_mpc"] upperVolumeLimit = (4. / 3.) * np.pi * upperMpcLimit ** 3 if lowerRedshiftLimit == 0.: lowerVolumeLimit = 0. else: distanceDictionary = da.convert_redshift_to_distance( lowerRedshiftLimit) lowerMpcLimit = distanceDictionary["dl_mpc"] lowerVolumeLimit = (4. / 3.) * np.pi * lowerMpcLimit ** 3 volumeShell = upperVolumeLimit - lowerVolumeLimit # GENERATE A LIST OF RANDOM DISTANCES redshiftList = [] for i in range(sampleNumber): randomVolume = lowerVolumeLimit + npr.random() * volumeShell randomDistance = (randomVolume * (3. / 4.) / np.pi) ** (1. / 3.) randomRedshift = da.convert_mpc_to_redshift(randomDistance) randomRedshift = closestNumber(randomRedshift, redshiftDistribution) # log.debug('randomDistance %s' % (randomDistance,)) redshiftList.append(randomRedshift) redshiftArray = np.array(redshiftList) # log.info('redshiftArray %s' % (redshiftArray,)) if plot: # FORCE SQUARE FIGURE AND SQUARE AXES LOOKS BETTER FOR POLAR fig = plt.figure( num=None, figsize=(8, 8), dpi=None, facecolor=None, edgecolor=None, frameon=True) ax = fig.add_axes( [0.1, 0.1, 0.8, 0.8], polar=True) thetaList = [] twoPi = 2. * np.pi for i in range(sampleNumber): thetaList.append(twoPi * npr.random()) thetaArray = np.array(thetaList) plt.scatter( thetaArray, redshiftArray, s=10, c='b', marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=None, linewidths=None, edgecolor='w', verts=None, hold=None) title = "SN Redshift Distribution" plt.title(title) fileName = pathToOutputPlotDirectory + title.replace(" ", "_") + ".png" plt.savefig(fileName) plt.clf() # clear figure return redshiftArray
['def', 'random_redshift_array', '(', 'log', ',', 'sampleNumber', ',', 'lowerRedshiftLimit', ',', 'upperRedshiftLimit', ',', 'redshiftResolution', ',', 'pathToOutputPlotDirectory', ',', 'plot', '=', 'False', ')', ':', '################ > IMPORTS ################', '## STANDARD LIB ##', '## THIRD PARTY ##', 'import', 'matplotlib', '.', 'pyplot', 'as', 'plt', 'import', 'numpy', 'as', 'np', 'import', 'numpy', '.', 'random', 'as', 'npr', '## LOCAL APPLICATION ##', 'import', 'dryxPython', '.', 'astrotools', 'as', 'da', 'redshiftDistribution', '=', 'np', '.', 'arange', '(', '0.', ',', 'upperRedshiftLimit', ',', 'redshiftResolution', ')', 'closestNumber', '=', 'lambda', 'n', ',', 'l', ':', 'min', '(', 'l', ',', 'key', '=', 'lambda', 'x', ':', 'abs', '(', 'x', '-', 'n', ')', ')', '# GIVEN THE REDSHIFT LIMIT - DETERMINE THE VOLUME LIMIT', 'distanceDictionary', '=', 'da', '.', 'convert_redshift_to_distance', '(', 'upperRedshiftLimit', ')', 'upperMpcLimit', '=', 'distanceDictionary', '[', '"dl_mpc"', ']', 'upperVolumeLimit', '=', '(', '4.', '/', '3.', ')', '*', 'np', '.', 'pi', '*', 'upperMpcLimit', '**', '3', 'if', 'lowerRedshiftLimit', '==', '0.', ':', 'lowerVolumeLimit', '=', '0.', 'else', ':', 'distanceDictionary', '=', 'da', '.', 'convert_redshift_to_distance', '(', 'lowerRedshiftLimit', ')', 'lowerMpcLimit', '=', 'distanceDictionary', '[', '"dl_mpc"', ']', 'lowerVolumeLimit', '=', '(', '4.', '/', '3.', ')', '*', 'np', '.', 'pi', '*', 'lowerMpcLimit', '**', '3', 'volumeShell', '=', 'upperVolumeLimit', '-', 'lowerVolumeLimit', '# GENERATE A LIST OF RANDOM DISTANCES', 'redshiftList', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'sampleNumber', ')', ':', 'randomVolume', '=', 'lowerVolumeLimit', '+', 'npr', '.', 'random', '(', ')', '*', 'volumeShell', 'randomDistance', '=', '(', 'randomVolume', '*', '(', '3.', '/', '4.', ')', '/', 'np', '.', 'pi', ')', '**', '(', '1.', '/', '3.', ')', 'randomRedshift', '=', 'da', '.', 'convert_mpc_to_redshift', '(', 'randomDistance', ')', 'randomRedshift', '=', 'closestNumber', '(', 'randomRedshift', ',', 'redshiftDistribution', ')', "# log.debug('randomDistance %s' % (randomDistance,))", 'redshiftList', '.', 'append', '(', 'randomRedshift', ')', 'redshiftArray', '=', 'np', '.', 'array', '(', 'redshiftList', ')', "# log.info('redshiftArray %s' % (redshiftArray,))", 'if', 'plot', ':', '# FORCE SQUARE FIGURE AND SQUARE AXES LOOKS BETTER FOR POLAR', 'fig', '=', 'plt', '.', 'figure', '(', 'num', '=', 'None', ',', 'figsize', '=', '(', '8', ',', '8', ')', ',', 'dpi', '=', 'None', ',', 'facecolor', '=', 'None', ',', 'edgecolor', '=', 'None', ',', 'frameon', '=', 'True', ')', 'ax', '=', 'fig', '.', 'add_axes', '(', '[', '0.1', ',', '0.1', ',', '0.8', ',', '0.8', ']', ',', 'polar', '=', 'True', ')', 'thetaList', '=', '[', ']', 'twoPi', '=', '2.', '*', 'np', '.', 'pi', 'for', 'i', 'in', 'range', '(', 'sampleNumber', ')', ':', 'thetaList', '.', 'append', '(', 'twoPi', '*', 'npr', '.', 'random', '(', ')', ')', 'thetaArray', '=', 'np', '.', 'array', '(', 'thetaList', ')', 'plt', '.', 'scatter', '(', 'thetaArray', ',', 'redshiftArray', ',', 's', '=', '10', ',', 'c', '=', "'b'", ',', 'marker', '=', "'o'", ',', 'cmap', '=', 'None', ',', 'norm', '=', 'None', ',', 'vmin', '=', 'None', ',', 'vmax', '=', 'None', ',', 'alpha', '=', 'None', ',', 'linewidths', '=', 'None', ',', 'edgecolor', '=', "'w'", ',', 'verts', '=', 'None', ',', 'hold', '=', 'None', ')', 'title', '=', '"SN Redshift Distribution"', 'plt', '.', 'title', '(', 'title', ')', 'fileName', '=', 'pathToOutputPlotDirectory', '+', 'title', '.', 'replace', '(', '" "', ',', '"_"', ')', '+', '".png"', 'plt', '.', 'savefig', '(', 'fileName', ')', 'plt', '.', 'clf', '(', ')', '# clear figure', 'return', 'redshiftArray']
*Generate a NumPy array of random distances given a sample number and distance limit* **Key Arguments:** - ``log`` -- logger - ``sampleNumber`` -- the sample number, i.e. array size - ``lowerRedshiftLimit`` -- the lower redshift limit of the volume to be included - ``upperRedshiftLimit`` -- the upper redshift limit of the volume to be included - ``redshiftResolution`` -- the resolution of the redshift distribution - ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user) - ``plot`` -- generate plot? **Return:** - ``redshiftArray`` -- an array of random redshifts within the volume limit
['*', 'Generate', 'a', 'NumPy', 'array', 'of', 'random', 'distances', 'given', 'a', 'sample', 'number', 'and', 'distance', 'limit', '*']
train
https://github.com/thespacedoctor/qubits/blob/3c02ace7226389841c6bb838d045c11bed61a3c2/qubits/universe.py#L36-L142
5,277
datacratic/pymldb
pymldb/data.py
BatFrame.rows
def rows(self): """Returns a numpy array of the rows name""" bf = self.copy() result = bf.query.executeQuery(format="soa") return result["_rowName"]
python
def rows(self): """Returns a numpy array of the rows name""" bf = self.copy() result = bf.query.executeQuery(format="soa") return result["_rowName"]
['def', 'rows', '(', 'self', ')', ':', 'bf', '=', 'self', '.', 'copy', '(', ')', 'result', '=', 'bf', '.', 'query', '.', 'executeQuery', '(', 'format', '=', '"soa"', ')', 'return', 'result', '[', '"_rowName"', ']']
Returns a numpy array of the rows name
['Returns', 'a', 'numpy', 'array', 'of', 'the', 'rows', 'name']
train
https://github.com/datacratic/pymldb/blob/e41f3c37138e9fd4a82ef3db685899cdafa4125e/pymldb/data.py#L65-L69
5,278
vbwagner/ctypescrypto
ctypescrypto/oid.py
create
def create(dotted, shortname, longname): """ Creates new OID in the database @param dotted - dotted-decimal representation of new OID @param shortname - short name for new OID @param longname - long name for new OID @returns Oid object corresponding to new OID This function should be used with exreme care. Whenever possible, it is better to add new OIDs via OpenSSL configuration file Results of calling this function twice for same OIDor for Oid alredy in database are undefined """ if pyver > 2: dotted = dotted.encode('ascii') shortname = shortname.encode('utf-8') longname = longname.encode('utf-8') nid = libcrypto.OBJ_create(dotted, shortname, longname) if nid == 0: raise LibCryptoError("Problem adding new OID to the database") return Oid(nid)
python
def create(dotted, shortname, longname): """ Creates new OID in the database @param dotted - dotted-decimal representation of new OID @param shortname - short name for new OID @param longname - long name for new OID @returns Oid object corresponding to new OID This function should be used with exreme care. Whenever possible, it is better to add new OIDs via OpenSSL configuration file Results of calling this function twice for same OIDor for Oid alredy in database are undefined """ if pyver > 2: dotted = dotted.encode('ascii') shortname = shortname.encode('utf-8') longname = longname.encode('utf-8') nid = libcrypto.OBJ_create(dotted, shortname, longname) if nid == 0: raise LibCryptoError("Problem adding new OID to the database") return Oid(nid)
['def', 'create', '(', 'dotted', ',', 'shortname', ',', 'longname', ')', ':', 'if', 'pyver', '>', '2', ':', 'dotted', '=', 'dotted', '.', 'encode', '(', "'ascii'", ')', 'shortname', '=', 'shortname', '.', 'encode', '(', "'utf-8'", ')', 'longname', '=', 'longname', '.', 'encode', '(', "'utf-8'", ')', 'nid', '=', 'libcrypto', '.', 'OBJ_create', '(', 'dotted', ',', 'shortname', ',', 'longname', ')', 'if', 'nid', '==', '0', ':', 'raise', 'LibCryptoError', '(', '"Problem adding new OID to the database"', ')', 'return', 'Oid', '(', 'nid', ')']
Creates new OID in the database @param dotted - dotted-decimal representation of new OID @param shortname - short name for new OID @param longname - long name for new OID @returns Oid object corresponding to new OID This function should be used with exreme care. Whenever possible, it is better to add new OIDs via OpenSSL configuration file Results of calling this function twice for same OIDor for Oid alredy in database are undefined
['Creates', 'new', 'OID', 'in', 'the', 'database']
train
https://github.com/vbwagner/ctypescrypto/blob/33c32904cf5e04901f87f90e2499634b8feecd3e/ctypescrypto/oid.py#L125-L150
5,279
filestack/filestack-python
filestack/models/filestack_transform.py
Transform.url
def url(self): """ Returns the URL for the current transformation, which can be used to retrieve the file. If security is enabled, signature and policy parameters will be included *returns* [String] ```python transform = client.upload(filepath='/path/to/file') transform.url() # https://cdn.filestackcontent.com/TRANSFORMS/FILE_HANDLE ``` """ return utils.get_transform_url( self._transformation_tasks, external_url=self.external_url, handle=self.handle, security=self.security, apikey=self.apikey )
python
def url(self): """ Returns the URL for the current transformation, which can be used to retrieve the file. If security is enabled, signature and policy parameters will be included *returns* [String] ```python transform = client.upload(filepath='/path/to/file') transform.url() # https://cdn.filestackcontent.com/TRANSFORMS/FILE_HANDLE ``` """ return utils.get_transform_url( self._transformation_tasks, external_url=self.external_url, handle=self.handle, security=self.security, apikey=self.apikey )
['def', 'url', '(', 'self', ')', ':', 'return', 'utils', '.', 'get_transform_url', '(', 'self', '.', '_transformation_tasks', ',', 'external_url', '=', 'self', '.', 'external_url', ',', 'handle', '=', 'self', '.', 'handle', ',', 'security', '=', 'self', '.', 'security', ',', 'apikey', '=', 'self', '.', 'apikey', ')']
Returns the URL for the current transformation, which can be used to retrieve the file. If security is enabled, signature and policy parameters will be included *returns* [String] ```python transform = client.upload(filepath='/path/to/file') transform.url() # https://cdn.filestackcontent.com/TRANSFORMS/FILE_HANDLE ```
['Returns', 'the', 'URL', 'for', 'the', 'current', 'transformation', 'which', 'can', 'be', 'used', 'to', 'retrieve', 'the', 'file', '.', 'If', 'security', 'is', 'enabled', 'signature', 'and', 'policy', 'parameters', 'will', 'be', 'included']
train
https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_transform.py#L96-L113
5,280
bitesofcode/projexui
projexui/widgets/xiconbutton.py
XIconButton.setFilepath
def setFilepath( self, filepath ): """ Sets the filepath for this button to the inputed path. :param filepath | <str> """ self._filepath = nativestring(filepath) self.setIcon(QIcon(filepath)) if ( not self.signalsBlocked() ): self.filepathChanged.emit(filepath)
python
def setFilepath( self, filepath ): """ Sets the filepath for this button to the inputed path. :param filepath | <str> """ self._filepath = nativestring(filepath) self.setIcon(QIcon(filepath)) if ( not self.signalsBlocked() ): self.filepathChanged.emit(filepath)
['def', 'setFilepath', '(', 'self', ',', 'filepath', ')', ':', 'self', '.', '_filepath', '=', 'nativestring', '(', 'filepath', ')', 'self', '.', 'setIcon', '(', 'QIcon', '(', 'filepath', ')', ')', 'if', '(', 'not', 'self', '.', 'signalsBlocked', '(', ')', ')', ':', 'self', '.', 'filepathChanged', '.', 'emit', '(', 'filepath', ')']
Sets the filepath for this button to the inputed path. :param filepath | <str>
['Sets', 'the', 'filepath', 'for', 'this', 'button', 'to', 'the', 'inputed', 'path', '.', ':', 'param', 'filepath', '|', '<str', '>']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xiconbutton.py#L126-L135
5,281
phoebe-project/phoebe2
phoebe/dependencies/autofig/axes.py
Axes.consistent_with_call
def consistent_with_call(self, call): """ check to see if a new call would be consistent to add to this Axes instance checks include: * compatible units in all directions * compatible independent-variable (if applicable) """ if len(self.calls) == 0: return True, '' msg = [] if not _consistent_allow_none(call._axorder, self._axorder): msg.append('inconsistent axorder, {} != {}'.format(call.axorder, self.axorder)) if not _consistent_allow_none(call._axpos, self._axpos): msg.append('inconsistent axpos, {} != {}'.format(call.axpos, self.axpos)) if call._axorder == self._axorder and call._axorder is not None: # then despite other conflicts, attempt to put on same axes return True, '' if call._axpos == self._axpos and call._axpos is not None: # then despite other conflicts, attempt to put on same axes return True, '' # TODO: include s, c, fc, ec, etc and make these checks into loops if call.x.unit.physical_type != self.x.unit.physical_type: msg.append('inconsitent xunit, {} != {}'.format(call.x.unit, self.x.unit)) if call.y.unit.physical_type != self.y.unit.physical_type: msg.append('inconsitent yunit, {} != {}'.format(call.y.unit, self.y.unit)) if call.z.unit.physical_type != self.z.unit.physical_type: msg.append('inconsitent zunit, {} != {}'.format(call.z.unit, self.z.unit)) if call.i.unit.physical_type != self.i.unit.physical_type: msg.append('inconsistent iunit, {} != {}'.format(call.i.unit, self.i.unit)) if call.i.is_reference or self.i.is_reference: if call.i.reference != self.i.reference: msg.append('inconsistent i reference, {} != {}'.format(call.i.reference, self.i.reference)) if not _consistent_allow_none(call.title, self.title): msg.append('inconsistent axes title, {} != {}'.format(call.title, self.title)) # here we send the protected _label so that we get None instead of empty string if not _consistent_allow_none(call.x._label, self.x._label): msg.append('inconsitent xlabel, {} != {}'.format(call.x.label, self.x.label)) if not _consistent_allow_none(call.y._label, self.y._label): msg.append('inconsitent ylabel, {} != {}'.format(call.y.label, self.y.label)) if not _consistent_allow_none(call.z._label, self.z._label): msg.append('inconsitent zlabel, {} != {}'.format(call.z.label, self.z.label)) if len(msg): return False, ', '.join(msg) else: return True, ''
python
def consistent_with_call(self, call): """ check to see if a new call would be consistent to add to this Axes instance checks include: * compatible units in all directions * compatible independent-variable (if applicable) """ if len(self.calls) == 0: return True, '' msg = [] if not _consistent_allow_none(call._axorder, self._axorder): msg.append('inconsistent axorder, {} != {}'.format(call.axorder, self.axorder)) if not _consistent_allow_none(call._axpos, self._axpos): msg.append('inconsistent axpos, {} != {}'.format(call.axpos, self.axpos)) if call._axorder == self._axorder and call._axorder is not None: # then despite other conflicts, attempt to put on same axes return True, '' if call._axpos == self._axpos and call._axpos is not None: # then despite other conflicts, attempt to put on same axes return True, '' # TODO: include s, c, fc, ec, etc and make these checks into loops if call.x.unit.physical_type != self.x.unit.physical_type: msg.append('inconsitent xunit, {} != {}'.format(call.x.unit, self.x.unit)) if call.y.unit.physical_type != self.y.unit.physical_type: msg.append('inconsitent yunit, {} != {}'.format(call.y.unit, self.y.unit)) if call.z.unit.physical_type != self.z.unit.physical_type: msg.append('inconsitent zunit, {} != {}'.format(call.z.unit, self.z.unit)) if call.i.unit.physical_type != self.i.unit.physical_type: msg.append('inconsistent iunit, {} != {}'.format(call.i.unit, self.i.unit)) if call.i.is_reference or self.i.is_reference: if call.i.reference != self.i.reference: msg.append('inconsistent i reference, {} != {}'.format(call.i.reference, self.i.reference)) if not _consistent_allow_none(call.title, self.title): msg.append('inconsistent axes title, {} != {}'.format(call.title, self.title)) # here we send the protected _label so that we get None instead of empty string if not _consistent_allow_none(call.x._label, self.x._label): msg.append('inconsitent xlabel, {} != {}'.format(call.x.label, self.x.label)) if not _consistent_allow_none(call.y._label, self.y._label): msg.append('inconsitent ylabel, {} != {}'.format(call.y.label, self.y.label)) if not _consistent_allow_none(call.z._label, self.z._label): msg.append('inconsitent zlabel, {} != {}'.format(call.z.label, self.z.label)) if len(msg): return False, ', '.join(msg) else: return True, ''
['def', 'consistent_with_call', '(', 'self', ',', 'call', ')', ':', 'if', 'len', '(', 'self', '.', 'calls', ')', '==', '0', ':', 'return', 'True', ',', "''", 'msg', '=', '[', ']', 'if', 'not', '_consistent_allow_none', '(', 'call', '.', '_axorder', ',', 'self', '.', '_axorder', ')', ':', 'msg', '.', 'append', '(', "'inconsistent axorder, {} != {}'", '.', 'format', '(', 'call', '.', 'axorder', ',', 'self', '.', 'axorder', ')', ')', 'if', 'not', '_consistent_allow_none', '(', 'call', '.', '_axpos', ',', 'self', '.', '_axpos', ')', ':', 'msg', '.', 'append', '(', "'inconsistent axpos, {} != {}'", '.', 'format', '(', 'call', '.', 'axpos', ',', 'self', '.', 'axpos', ')', ')', 'if', 'call', '.', '_axorder', '==', 'self', '.', '_axorder', 'and', 'call', '.', '_axorder', 'is', 'not', 'None', ':', '# then despite other conflicts, attempt to put on same axes', 'return', 'True', ',', "''", 'if', 'call', '.', '_axpos', '==', 'self', '.', '_axpos', 'and', 'call', '.', '_axpos', 'is', 'not', 'None', ':', '# then despite other conflicts, attempt to put on same axes', 'return', 'True', ',', "''", '# TODO: include s, c, fc, ec, etc and make these checks into loops', 'if', 'call', '.', 'x', '.', 'unit', '.', 'physical_type', '!=', 'self', '.', 'x', '.', 'unit', '.', 'physical_type', ':', 'msg', '.', 'append', '(', "'inconsitent xunit, {} != {}'", '.', 'format', '(', 'call', '.', 'x', '.', 'unit', ',', 'self', '.', 'x', '.', 'unit', ')', ')', 'if', 'call', '.', 'y', '.', 'unit', '.', 'physical_type', '!=', 'self', '.', 'y', '.', 'unit', '.', 'physical_type', ':', 'msg', '.', 'append', '(', "'inconsitent yunit, {} != {}'", '.', 'format', '(', 'call', '.', 'y', '.', 'unit', ',', 'self', '.', 'y', '.', 'unit', ')', ')', 'if', 'call', '.', 'z', '.', 'unit', '.', 'physical_type', '!=', 'self', '.', 'z', '.', 'unit', '.', 'physical_type', ':', 'msg', '.', 'append', '(', "'inconsitent zunit, {} != {}'", '.', 'format', '(', 'call', '.', 'z', '.', 'unit', ',', 'self', '.', 'z', '.', 'unit', ')', ')', 'if', 'call', '.', 'i', '.', 'unit', '.', 'physical_type', '!=', 'self', '.', 'i', '.', 'unit', '.', 'physical_type', ':', 'msg', '.', 'append', '(', "'inconsistent iunit, {} != {}'", '.', 'format', '(', 'call', '.', 'i', '.', 'unit', ',', 'self', '.', 'i', '.', 'unit', ')', ')', 'if', 'call', '.', 'i', '.', 'is_reference', 'or', 'self', '.', 'i', '.', 'is_reference', ':', 'if', 'call', '.', 'i', '.', 'reference', '!=', 'self', '.', 'i', '.', 'reference', ':', 'msg', '.', 'append', '(', "'inconsistent i reference, {} != {}'", '.', 'format', '(', 'call', '.', 'i', '.', 'reference', ',', 'self', '.', 'i', '.', 'reference', ')', ')', 'if', 'not', '_consistent_allow_none', '(', 'call', '.', 'title', ',', 'self', '.', 'title', ')', ':', 'msg', '.', 'append', '(', "'inconsistent axes title, {} != {}'", '.', 'format', '(', 'call', '.', 'title', ',', 'self', '.', 'title', ')', ')', '# here we send the protected _label so that we get None instead of empty string', 'if', 'not', '_consistent_allow_none', '(', 'call', '.', 'x', '.', '_label', ',', 'self', '.', 'x', '.', '_label', ')', ':', 'msg', '.', 'append', '(', "'inconsitent xlabel, {} != {}'", '.', 'format', '(', 'call', '.', 'x', '.', 'label', ',', 'self', '.', 'x', '.', 'label', ')', ')', 'if', 'not', '_consistent_allow_none', '(', 'call', '.', 'y', '.', '_label', ',', 'self', '.', 'y', '.', '_label', ')', ':', 'msg', '.', 'append', '(', "'inconsitent ylabel, {} != {}'", '.', 'format', '(', 'call', '.', 'y', '.', 'label', ',', 'self', '.', 'y', '.', 'label', ')', ')', 'if', 'not', '_consistent_allow_none', '(', 'call', '.', 'z', '.', '_label', ',', 'self', '.', 'z', '.', '_label', ')', ':', 'msg', '.', 'append', '(', "'inconsitent zlabel, {} != {}'", '.', 'format', '(', 'call', '.', 'z', '.', 'label', ',', 'self', '.', 'z', '.', 'label', ')', ')', 'if', 'len', '(', 'msg', ')', ':', 'return', 'False', ',', "', '", '.', 'join', '(', 'msg', ')', 'else', ':', 'return', 'True', ',', "''"]
check to see if a new call would be consistent to add to this Axes instance checks include: * compatible units in all directions * compatible independent-variable (if applicable)
['check', 'to', 'see', 'if', 'a', 'new', 'call', 'would', 'be', 'consistent', 'to', 'add', 'to', 'this', 'Axes', 'instance']
train
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/dependencies/autofig/axes.py#L388-L443
5,282
chaoss/grimoirelab-sortinghat
sortinghat/cmd/unify.py
Unify.__marshal_matches
def __marshal_matches(matched): """Convert matches to JSON format. :param matched: a list of matched identities :returns json_matches: a list of matches in JSON format """ json_matches = [] for m in matched: identities = [i.uuid for i in m] if len(identities) == 1: continue json_match = { 'identities': identities, 'processed': False } json_matches.append(json_match) return json_matches
python
def __marshal_matches(matched): """Convert matches to JSON format. :param matched: a list of matched identities :returns json_matches: a list of matches in JSON format """ json_matches = [] for m in matched: identities = [i.uuid for i in m] if len(identities) == 1: continue json_match = { 'identities': identities, 'processed': False } json_matches.append(json_match) return json_matches
['def', '__marshal_matches', '(', 'matched', ')', ':', 'json_matches', '=', '[', ']', 'for', 'm', 'in', 'matched', ':', 'identities', '=', '[', 'i', '.', 'uuid', 'for', 'i', 'in', 'm', ']', 'if', 'len', '(', 'identities', ')', '==', '1', ':', 'continue', 'json_match', '=', '{', "'identities'", ':', 'identities', ',', "'processed'", ':', 'False', '}', 'json_matches', '.', 'append', '(', 'json_match', ')', 'return', 'json_matches']
Convert matches to JSON format. :param matched: a list of matched identities :returns json_matches: a list of matches in JSON format
['Convert', 'matches', 'to', 'JSON', 'format', '.']
train
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/unify.py#L248-L268
5,283
MartinThoma/mpu
mpu/_cli.py
get_parser
def get_parser(): """Get parser for mpu.""" from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('--version', action='version', version='mpu {}'.format(mpu.__version__)) subparsers = parser.add_subparsers(help='Python package commands') package_parser = subparsers.add_parser('package') mpu.package.cli.get_parser(package_parser) return parser
python
def get_parser(): """Get parser for mpu.""" from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('--version', action='version', version='mpu {}'.format(mpu.__version__)) subparsers = parser.add_subparsers(help='Python package commands') package_parser = subparsers.add_parser('package') mpu.package.cli.get_parser(package_parser) return parser
['def', 'get_parser', '(', ')', ':', 'from', 'argparse', 'import', 'ArgumentParser', ',', 'ArgumentDefaultsHelpFormatter', 'parser', '=', 'ArgumentParser', '(', 'description', '=', '__doc__', ',', 'formatter_class', '=', 'ArgumentDefaultsHelpFormatter', ')', 'parser', '.', 'add_argument', '(', "'--version'", ',', 'action', '=', "'version'", ',', 'version', '=', "'mpu {}'", '.', 'format', '(', 'mpu', '.', '__version__', ')', ')', 'subparsers', '=', 'parser', '.', 'add_subparsers', '(', 'help', '=', "'Python package commands'", ')', 'package_parser', '=', 'subparsers', '.', 'add_parser', '(', "'package'", ')', 'mpu', '.', 'package', '.', 'cli', '.', 'get_parser', '(', 'package_parser', ')', 'return', 'parser']
Get parser for mpu.
['Get', 'parser', 'for', 'mpu', '.']
train
https://github.com/MartinThoma/mpu/blob/61bc36d0192ca90c0bcf9b8a5d7d0d8520e20ff6/mpu/_cli.py#L23-L34
5,284
talentpair/featurevectormatrix
featurevectormatrix/__init__.py
FeatureVectorMatrix.get_row_list
def get_row_list(self, row_idx): """ get a feature vector for the nth row :param row_idx: which row :return: a list of feature values, ordered by column_names """ try: row = self._rows[row_idx] except TypeError: row = self._rows[self._row_name_idx[row_idx]] if isinstance(row, list): extra = [ self._default_value ] * (len(self._column_name_list) - len(row)) return row + extra else: if row_idx not in self._row_memo: self._row_memo[row_idx] = [ row[k] if k in row else self._default_value for k in self._column_name_list ] return self._row_memo[row_idx]
python
def get_row_list(self, row_idx): """ get a feature vector for the nth row :param row_idx: which row :return: a list of feature values, ordered by column_names """ try: row = self._rows[row_idx] except TypeError: row = self._rows[self._row_name_idx[row_idx]] if isinstance(row, list): extra = [ self._default_value ] * (len(self._column_name_list) - len(row)) return row + extra else: if row_idx not in self._row_memo: self._row_memo[row_idx] = [ row[k] if k in row else self._default_value for k in self._column_name_list ] return self._row_memo[row_idx]
['def', 'get_row_list', '(', 'self', ',', 'row_idx', ')', ':', 'try', ':', 'row', '=', 'self', '.', '_rows', '[', 'row_idx', ']', 'except', 'TypeError', ':', 'row', '=', 'self', '.', '_rows', '[', 'self', '.', '_row_name_idx', '[', 'row_idx', ']', ']', 'if', 'isinstance', '(', 'row', ',', 'list', ')', ':', 'extra', '=', '[', 'self', '.', '_default_value', ']', '*', '(', 'len', '(', 'self', '.', '_column_name_list', ')', '-', 'len', '(', 'row', ')', ')', 'return', 'row', '+', 'extra', 'else', ':', 'if', 'row_idx', 'not', 'in', 'self', '.', '_row_memo', ':', 'self', '.', '_row_memo', '[', 'row_idx', ']', '=', '[', 'row', '[', 'k', ']', 'if', 'k', 'in', 'row', 'else', 'self', '.', '_default_value', 'for', 'k', 'in', 'self', '.', '_column_name_list', ']', 'return', 'self', '.', '_row_memo', '[', 'row_idx', ']']
get a feature vector for the nth row :param row_idx: which row :return: a list of feature values, ordered by column_names
['get', 'a', 'feature', 'vector', 'for', 'the', 'nth', 'row']
train
https://github.com/talentpair/featurevectormatrix/blob/1327026f7e46138947ba55433c11a85bca1adc5d/featurevectormatrix/__init__.py#L184-L204
5,285
sethmlarson/virtualbox-python
virtualbox/library_ext/machine.py
IMachine.clone
def clone(self, snapshot_name_or_id=None, mode=library.CloneMode.machine_state, options=None, name=None, uuid=None, groups=None, basefolder='', register=True): """Clone this Machine Options: snapshot_name_or_id - value can be either ISnapshot, name, or id mode - set the CloneMode value options - define the CloneOptions options name - define a name of the new VM uuid - set the uuid of the new VM groups - specify which groups the new VM will exist under basefolder - specify which folder to set the VM up under register - register this VM with the server Note: Default values create a linked clone from the current machine state Return a IMachine object for the newly cloned vm """ if options is None: options = [library.CloneOptions.link] if groups is None: groups = [] vbox = virtualbox.VirtualBox() if snapshot_name_or_id is not None: if isinstance(snapshot_name_or_id, basestring): snapshot = self.find_snapshot(snapshot_name_or_id) else: snapshot = snapshot_name_or_id vm = snapshot.machine else: # linked clone can only be created from a snapshot... # try grabbing the current_snapshot if library.CloneOptions.link in options: vm = self.current_snapshot.machine else: vm = self if name is None: name = "%s Clone" % vm.name # Build the settings file create_flags = '' if uuid is not None: create_flags = "UUID=%s" % uuid primary_group = '' if groups: primary_group = groups[0] # Make sure this settings file does not already exist test_name = name settings_file = '' for i in range(1, 1000): settings_file = vbox.compose_machine_filename(test_name, primary_group, create_flags, basefolder) if not os.path.exists(os.path.dirname(settings_file)): break test_name = "%s (%s)" % (name, i) name = test_name # Create the new machine and clone it! vm_clone = vbox.create_machine(settings_file, name, groups, '', create_flags) progress = vm.clone_to(vm_clone, mode, options) progress.wait_for_completion(-1) if register: vbox.register_machine(vm_clone) return vm_clone
python
def clone(self, snapshot_name_or_id=None, mode=library.CloneMode.machine_state, options=None, name=None, uuid=None, groups=None, basefolder='', register=True): """Clone this Machine Options: snapshot_name_or_id - value can be either ISnapshot, name, or id mode - set the CloneMode value options - define the CloneOptions options name - define a name of the new VM uuid - set the uuid of the new VM groups - specify which groups the new VM will exist under basefolder - specify which folder to set the VM up under register - register this VM with the server Note: Default values create a linked clone from the current machine state Return a IMachine object for the newly cloned vm """ if options is None: options = [library.CloneOptions.link] if groups is None: groups = [] vbox = virtualbox.VirtualBox() if snapshot_name_or_id is not None: if isinstance(snapshot_name_or_id, basestring): snapshot = self.find_snapshot(snapshot_name_or_id) else: snapshot = snapshot_name_or_id vm = snapshot.machine else: # linked clone can only be created from a snapshot... # try grabbing the current_snapshot if library.CloneOptions.link in options: vm = self.current_snapshot.machine else: vm = self if name is None: name = "%s Clone" % vm.name # Build the settings file create_flags = '' if uuid is not None: create_flags = "UUID=%s" % uuid primary_group = '' if groups: primary_group = groups[0] # Make sure this settings file does not already exist test_name = name settings_file = '' for i in range(1, 1000): settings_file = vbox.compose_machine_filename(test_name, primary_group, create_flags, basefolder) if not os.path.exists(os.path.dirname(settings_file)): break test_name = "%s (%s)" % (name, i) name = test_name # Create the new machine and clone it! vm_clone = vbox.create_machine(settings_file, name, groups, '', create_flags) progress = vm.clone_to(vm_clone, mode, options) progress.wait_for_completion(-1) if register: vbox.register_machine(vm_clone) return vm_clone
['def', 'clone', '(', 'self', ',', 'snapshot_name_or_id', '=', 'None', ',', 'mode', '=', 'library', '.', 'CloneMode', '.', 'machine_state', ',', 'options', '=', 'None', ',', 'name', '=', 'None', ',', 'uuid', '=', 'None', ',', 'groups', '=', 'None', ',', 'basefolder', '=', "''", ',', 'register', '=', 'True', ')', ':', 'if', 'options', 'is', 'None', ':', 'options', '=', '[', 'library', '.', 'CloneOptions', '.', 'link', ']', 'if', 'groups', 'is', 'None', ':', 'groups', '=', '[', ']', 'vbox', '=', 'virtualbox', '.', 'VirtualBox', '(', ')', 'if', 'snapshot_name_or_id', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'snapshot_name_or_id', ',', 'basestring', ')', ':', 'snapshot', '=', 'self', '.', 'find_snapshot', '(', 'snapshot_name_or_id', ')', 'else', ':', 'snapshot', '=', 'snapshot_name_or_id', 'vm', '=', 'snapshot', '.', 'machine', 'else', ':', '# linked clone can only be created from a snapshot...', '# try grabbing the current_snapshot', 'if', 'library', '.', 'CloneOptions', '.', 'link', 'in', 'options', ':', 'vm', '=', 'self', '.', 'current_snapshot', '.', 'machine', 'else', ':', 'vm', '=', 'self', 'if', 'name', 'is', 'None', ':', 'name', '=', '"%s Clone"', '%', 'vm', '.', 'name', '# Build the settings file', 'create_flags', '=', "''", 'if', 'uuid', 'is', 'not', 'None', ':', 'create_flags', '=', '"UUID=%s"', '%', 'uuid', 'primary_group', '=', "''", 'if', 'groups', ':', 'primary_group', '=', 'groups', '[', '0', ']', '# Make sure this settings file does not already exist', 'test_name', '=', 'name', 'settings_file', '=', "''", 'for', 'i', 'in', 'range', '(', '1', ',', '1000', ')', ':', 'settings_file', '=', 'vbox', '.', 'compose_machine_filename', '(', 'test_name', ',', 'primary_group', ',', 'create_flags', ',', 'basefolder', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'os', '.', 'path', '.', 'dirname', '(', 'settings_file', ')', ')', ':', 'break', 'test_name', '=', '"%s (%s)"', '%', '(', 'name', ',', 'i', ')', 'name', '=', 'test_name', '# Create the new machine and clone it!', 'vm_clone', '=', 'vbox', '.', 'create_machine', '(', 'settings_file', ',', 'name', ',', 'groups', ',', "''", ',', 'create_flags', ')', 'progress', '=', 'vm', '.', 'clone_to', '(', 'vm_clone', ',', 'mode', ',', 'options', ')', 'progress', '.', 'wait_for_completion', '(', '-', '1', ')', 'if', 'register', ':', 'vbox', '.', 'register_machine', '(', 'vm_clone', ')', 'return', 'vm_clone']
Clone this Machine Options: snapshot_name_or_id - value can be either ISnapshot, name, or id mode - set the CloneMode value options - define the CloneOptions options name - define a name of the new VM uuid - set the uuid of the new VM groups - specify which groups the new VM will exist under basefolder - specify which folder to set the VM up under register - register this VM with the server Note: Default values create a linked clone from the current machine state Return a IMachine object for the newly cloned vm
['Clone', 'this', 'Machine']
train
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library_ext/machine.py#L64-L137
5,286
SylvanasSun/python-common-cache
common_cache/utils.py
RWLock.acquire_reader
def acquire_reader(self): """ Acquire a read lock, several threads can hold this type of lock. """ with self.mutex: while self.rwlock < 0 or self.rwlock == self.max_reader_concurrency or self.writers_waiting: self.readers_ok.wait() self.rwlock += 1
python
def acquire_reader(self): """ Acquire a read lock, several threads can hold this type of lock. """ with self.mutex: while self.rwlock < 0 or self.rwlock == self.max_reader_concurrency or self.writers_waiting: self.readers_ok.wait() self.rwlock += 1
['def', 'acquire_reader', '(', 'self', ')', ':', 'with', 'self', '.', 'mutex', ':', 'while', 'self', '.', 'rwlock', '<', '0', 'or', 'self', '.', 'rwlock', '==', 'self', '.', 'max_reader_concurrency', 'or', 'self', '.', 'writers_waiting', ':', 'self', '.', 'readers_ok', '.', 'wait', '(', ')', 'self', '.', 'rwlock', '+=', '1']
Acquire a read lock, several threads can hold this type of lock.
['Acquire', 'a', 'read', 'lock', 'several', 'threads', 'can', 'hold', 'this', 'type', 'of', 'lock', '.']
train
https://github.com/SylvanasSun/python-common-cache/blob/f113eb3cd751eed5ab5373e8610a31a444220cf8/common_cache/utils.py#L73-L80
5,287
outini/python-pylls
pylls/cachet.py
Components.update
def update(self, component_id, name=None, status=None, description=None, link=None, order=None, group_id=None, enabled=True): """Update a component :param int component_id: Component ID :param str name: Name of the component (optional) :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Updated component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses """ data = ApiParams() data['component'] = component_id data['name'] = name data['status'] = status data['description'] = description data['link'] = link data['order'] = order data['group_id'] = group_id data['enabled'] = enabled return self._put('components/%s' % component_id, data=data)['data']
python
def update(self, component_id, name=None, status=None, description=None, link=None, order=None, group_id=None, enabled=True): """Update a component :param int component_id: Component ID :param str name: Name of the component (optional) :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Updated component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses """ data = ApiParams() data['component'] = component_id data['name'] = name data['status'] = status data['description'] = description data['link'] = link data['order'] = order data['group_id'] = group_id data['enabled'] = enabled return self._put('components/%s' % component_id, data=data)['data']
['def', 'update', '(', 'self', ',', 'component_id', ',', 'name', '=', 'None', ',', 'status', '=', 'None', ',', 'description', '=', 'None', ',', 'link', '=', 'None', ',', 'order', '=', 'None', ',', 'group_id', '=', 'None', ',', 'enabled', '=', 'True', ')', ':', 'data', '=', 'ApiParams', '(', ')', 'data', '[', "'component'", ']', '=', 'component_id', 'data', '[', "'name'", ']', '=', 'name', 'data', '[', "'status'", ']', '=', 'status', 'data', '[', "'description'", ']', '=', 'description', 'data', '[', "'link'", ']', '=', 'link', 'data', '[', "'order'", ']', '=', 'order', 'data', '[', "'group_id'", ']', '=', 'group_id', 'data', '[', "'enabled'", ']', '=', 'enabled', 'return', 'self', '.', '_put', '(', "'components/%s'", '%', 'component_id', ',', 'data', '=', 'data', ')', '[', "'data'", ']']
Update a component :param int component_id: Component ID :param str name: Name of the component (optional) :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Updated component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses
['Update', 'a', 'component']
train
https://github.com/outini/python-pylls/blob/f9fa220594bc1974469097d9bad690a42d0d0f0f/pylls/cachet.py#L139-L165
5,288
datadotworld/data.world-py
datadotworld/client/api.py
RestApiClient.update_insight
def update_insight(self, project_key, insight_id, **kwargs): """Update an insight. **Note that only elements included in the request will be updated. All omitted elements will remain untouched. :param project_key: Projrct identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :param title: Insight title :type title: str :param description: Insight description. :type description: str, optional :param image_url: If image-based, the URL of the image :type image_url: str :param embed_url: If embed-based, the embeddable URL :type embed_url: str :param source_link: Permalink to source code or platform this insight was generated with. Allows others to replicate the steps originally used to produce the insight. :type source_link: str, optional :param data_source_links: One or more permalinks to the data sources used to generate this insight. Allows others to access the data originally used to produce the insight. :type data_source_links: array :returns: message object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.update_insight( ... 'username/test-project', 'insightid' ... title='demo atadotworld'}) # doctest: +SKIP """ request = self.__build_insight_obj( lambda: _swagger.InsightPatchRequest(), kwargs) project_owner, project_id = parse_dataset_key(project_key) try: self._insights_api.update_insight(project_owner, project_id, insight_id, body=request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
python
def update_insight(self, project_key, insight_id, **kwargs): """Update an insight. **Note that only elements included in the request will be updated. All omitted elements will remain untouched. :param project_key: Projrct identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :param title: Insight title :type title: str :param description: Insight description. :type description: str, optional :param image_url: If image-based, the URL of the image :type image_url: str :param embed_url: If embed-based, the embeddable URL :type embed_url: str :param source_link: Permalink to source code or platform this insight was generated with. Allows others to replicate the steps originally used to produce the insight. :type source_link: str, optional :param data_source_links: One or more permalinks to the data sources used to generate this insight. Allows others to access the data originally used to produce the insight. :type data_source_links: array :returns: message object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.update_insight( ... 'username/test-project', 'insightid' ... title='demo atadotworld'}) # doctest: +SKIP """ request = self.__build_insight_obj( lambda: _swagger.InsightPatchRequest(), kwargs) project_owner, project_id = parse_dataset_key(project_key) try: self._insights_api.update_insight(project_owner, project_id, insight_id, body=request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
['def', 'update_insight', '(', 'self', ',', 'project_key', ',', 'insight_id', ',', '*', '*', 'kwargs', ')', ':', 'request', '=', 'self', '.', '__build_insight_obj', '(', 'lambda', ':', '_swagger', '.', 'InsightPatchRequest', '(', ')', ',', 'kwargs', ')', 'project_owner', ',', 'project_id', '=', 'parse_dataset_key', '(', 'project_key', ')', 'try', ':', 'self', '.', '_insights_api', '.', 'update_insight', '(', 'project_owner', ',', 'project_id', ',', 'insight_id', ',', 'body', '=', 'request', ')', 'except', '_swagger', '.', 'rest', '.', 'ApiException', 'as', 'e', ':', 'raise', 'RestApiError', '(', 'cause', '=', 'e', ')']
Update an insight. **Note that only elements included in the request will be updated. All omitted elements will remain untouched. :param project_key: Projrct identifier, in the form of projectOwner/projectid :type project_key: str :param insight_id: Insight unique identifier. :type insight_id: str :param title: Insight title :type title: str :param description: Insight description. :type description: str, optional :param image_url: If image-based, the URL of the image :type image_url: str :param embed_url: If embed-based, the embeddable URL :type embed_url: str :param source_link: Permalink to source code or platform this insight was generated with. Allows others to replicate the steps originally used to produce the insight. :type source_link: str, optional :param data_source_links: One or more permalinks to the data sources used to generate this insight. Allows others to access the data originally used to produce the insight. :type data_source_links: array :returns: message object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.update_insight( ... 'username/test-project', 'insightid' ... title='demo atadotworld'}) # doctest: +SKIP
['Update', 'an', 'insight', '.']
train
https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/client/api.py#L1260-L1306
5,289
Erotemic/utool
utool/util_list.py
safe_listget
def safe_listget(list_, index, default='?'): """ depricate """ if index >= len(list_): return default ret = list_[index] if ret is None: return default return ret
python
def safe_listget(list_, index, default='?'): """ depricate """ if index >= len(list_): return default ret = list_[index] if ret is None: return default return ret
['def', 'safe_listget', '(', 'list_', ',', 'index', ',', 'default', '=', "'?'", ')', ':', 'if', 'index', '>=', 'len', '(', 'list_', ')', ':', 'return', 'default', 'ret', '=', 'list_', '[', 'index', ']', 'if', 'ret', 'is', 'None', ':', 'return', 'default', 'return', 'ret']
depricate
['depricate']
train
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L246-L253
5,290
swift-nav/libsbp
python/sbp/utils.py
walk_json_dict
def walk_json_dict(coll): """ Flatten a parsed SBP object into a dicts and lists, which are compatible for JSON output. Parameters ---------- coll : dict """ if isinstance(coll, dict): return dict((k, walk_json_dict(v)) for (k, v) in iter(coll.items())) elif isinstance(coll, bytes): return coll.decode('ascii') elif hasattr(coll, '__iter__') and not isinstance(coll, str): return [walk_json_dict(seq) for seq in coll] else: return coll
python
def walk_json_dict(coll): """ Flatten a parsed SBP object into a dicts and lists, which are compatible for JSON output. Parameters ---------- coll : dict """ if isinstance(coll, dict): return dict((k, walk_json_dict(v)) for (k, v) in iter(coll.items())) elif isinstance(coll, bytes): return coll.decode('ascii') elif hasattr(coll, '__iter__') and not isinstance(coll, str): return [walk_json_dict(seq) for seq in coll] else: return coll
['def', 'walk_json_dict', '(', 'coll', ')', ':', 'if', 'isinstance', '(', 'coll', ',', 'dict', ')', ':', 'return', 'dict', '(', '(', 'k', ',', 'walk_json_dict', '(', 'v', ')', ')', 'for', '(', 'k', ',', 'v', ')', 'in', 'iter', '(', 'coll', '.', 'items', '(', ')', ')', ')', 'elif', 'isinstance', '(', 'coll', ',', 'bytes', ')', ':', 'return', 'coll', '.', 'decode', '(', "'ascii'", ')', 'elif', 'hasattr', '(', 'coll', ',', "'__iter__'", ')', 'and', 'not', 'isinstance', '(', 'coll', ',', 'str', ')', ':', 'return', '[', 'walk_json_dict', '(', 'seq', ')', 'for', 'seq', 'in', 'coll', ']', 'else', ':', 'return', 'coll']
Flatten a parsed SBP object into a dicts and lists, which are compatible for JSON output. Parameters ---------- coll : dict
['Flatten', 'a', 'parsed', 'SBP', 'object', 'into', 'a', 'dicts', 'and', 'lists', 'which', 'are', 'compatible', 'for', 'JSON', 'output', '.']
train
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/utils.py#L28-L45
5,291
apple/turicreate
src/unity/python/turicreate/_sys_util.py
setup_environment_from_config_file
def setup_environment_from_config_file(): """ Imports the environmental configuration settings from the config file, if present, and sets the environment variables to test it. """ from os.path import exists config_file = get_config_file() if not exists(config_file): return try: config = _ConfigParser.SafeConfigParser() config.read(config_file) __section = "Environment" if config.has_section(__section): items = config.items(__section) for k, v in items: try: os.environ[k.upper()] = v except Exception as e: print(("WARNING: Error setting environment variable " "'%s = %s' from config file '%s': %s.") % (k, str(v), config_file, str(e)) ) except Exception as e: print("WARNING: Error reading config file '%s': %s." % (config_file, str(e)))
python
def setup_environment_from_config_file(): """ Imports the environmental configuration settings from the config file, if present, and sets the environment variables to test it. """ from os.path import exists config_file = get_config_file() if not exists(config_file): return try: config = _ConfigParser.SafeConfigParser() config.read(config_file) __section = "Environment" if config.has_section(__section): items = config.items(__section) for k, v in items: try: os.environ[k.upper()] = v except Exception as e: print(("WARNING: Error setting environment variable " "'%s = %s' from config file '%s': %s.") % (k, str(v), config_file, str(e)) ) except Exception as e: print("WARNING: Error reading config file '%s': %s." % (config_file, str(e)))
['def', 'setup_environment_from_config_file', '(', ')', ':', 'from', 'os', '.', 'path', 'import', 'exists', 'config_file', '=', 'get_config_file', '(', ')', 'if', 'not', 'exists', '(', 'config_file', ')', ':', 'return', 'try', ':', 'config', '=', '_ConfigParser', '.', 'SafeConfigParser', '(', ')', 'config', '.', 'read', '(', 'config_file', ')', '__section', '=', '"Environment"', 'if', 'config', '.', 'has_section', '(', '__section', ')', ':', 'items', '=', 'config', '.', 'items', '(', '__section', ')', 'for', 'k', ',', 'v', 'in', 'items', ':', 'try', ':', 'os', '.', 'environ', '[', 'k', '.', 'upper', '(', ')', ']', '=', 'v', 'except', 'Exception', 'as', 'e', ':', 'print', '(', '(', '"WARNING: Error setting environment variable "', '"\'%s = %s\' from config file \'%s\': %s."', ')', '%', '(', 'k', ',', 'str', '(', 'v', ')', ',', 'config_file', ',', 'str', '(', 'e', ')', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'print', '(', '"WARNING: Error reading config file \'%s\': %s."', '%', '(', 'config_file', ',', 'str', '(', 'e', ')', ')', ')']
Imports the environmental configuration settings from the config file, if present, and sets the environment variables to test it.
['Imports', 'the', 'environmental', 'configuration', 'settings', 'from', 'the', 'config', 'file', 'if', 'present', 'and', 'sets', 'the', 'environment', 'variables', 'to', 'test', 'it', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L491-L522
5,292
bitprophet/ssh
ssh/hostkeys.py
HostKeyEntry.to_line
def to_line(self): """ Returns a string in OpenSSH known_hosts file format, or None if the object is not in a valid state. A trailing newline is included. """ if self.valid: return '%s %s %s\n' % (','.join(self.hostnames), self.key.get_name(), self.key.get_base64()) return None
python
def to_line(self): """ Returns a string in OpenSSH known_hosts file format, or None if the object is not in a valid state. A trailing newline is included. """ if self.valid: return '%s %s %s\n' % (','.join(self.hostnames), self.key.get_name(), self.key.get_base64()) return None
['def', 'to_line', '(', 'self', ')', ':', 'if', 'self', '.', 'valid', ':', 'return', "'%s %s %s\\n'", '%', '(', "','", '.', 'join', '(', 'self', '.', 'hostnames', ')', ',', 'self', '.', 'key', '.', 'get_name', '(', ')', ',', 'self', '.', 'key', '.', 'get_base64', '(', ')', ')', 'return', 'None']
Returns a string in OpenSSH known_hosts file format, or None if the object is not in a valid state. A trailing newline is included.
['Returns', 'a', 'string', 'in', 'OpenSSH', 'known_hosts', 'file', 'format', 'or', 'None', 'if', 'the', 'object', 'is', 'not', 'in', 'a', 'valid', 'state', '.', 'A', 'trailing', 'newline', 'is', 'included', '.']
train
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/hostkeys.py#L88-L97
5,293
NuGrid/NuGridPy
nugridpy/astronomy.py
imf
def imf(m): ''' Returns ------- N(M)dM for given mass according to Kroupa IMF, vectorization available via vimf() ''' m1 = 0.08; m2 = 0.50 a1 = 0.30; a2 = 1.30; a3 = 2.3 const2 = m1**-a1 -m1**-a2 const3 = m2**-a2 -m2**-a3 if m < 0.08: alpha = 0.3 const = -const2 -const3 elif m < 0.50: alpha = 1.3 const = -const3 else: alpha = 2.3 const = 0.0 # print m,alpha, const, m**-alpha + const return m**-alpha + const
python
def imf(m): ''' Returns ------- N(M)dM for given mass according to Kroupa IMF, vectorization available via vimf() ''' m1 = 0.08; m2 = 0.50 a1 = 0.30; a2 = 1.30; a3 = 2.3 const2 = m1**-a1 -m1**-a2 const3 = m2**-a2 -m2**-a3 if m < 0.08: alpha = 0.3 const = -const2 -const3 elif m < 0.50: alpha = 1.3 const = -const3 else: alpha = 2.3 const = 0.0 # print m,alpha, const, m**-alpha + const return m**-alpha + const
['def', 'imf', '(', 'm', ')', ':', 'm1', '=', '0.08', 'm2', '=', '0.50', 'a1', '=', '0.30', 'a2', '=', '1.30', 'a3', '=', '2.3', 'const2', '=', 'm1', '**', '-', 'a1', '-', 'm1', '**', '-', 'a2', 'const3', '=', 'm2', '**', '-', 'a2', '-', 'm2', '**', '-', 'a3', 'if', 'm', '<', '0.08', ':', 'alpha', '=', '0.3', 'const', '=', '-', 'const2', '-', 'const3', 'elif', 'm', '<', '0.50', ':', 'alpha', '=', '1.3', 'const', '=', '-', 'const3', 'else', ':', 'alpha', '=', '2.3', 'const', '=', '0.0', '# print m,alpha, const, m**-alpha + const ', 'return', 'm', '**', '-', 'alpha', '+', 'const']
Returns ------- N(M)dM for given mass according to Kroupa IMF, vectorization available via vimf()
['Returns', '-------', 'N', '(', 'M', ')', 'dM', 'for', 'given', 'mass', 'according', 'to', 'Kroupa', 'IMF', 'vectorization', 'available', 'via', 'vimf', '()']
train
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/astronomy.py#L193-L218
5,294
angr/claripy
claripy/balancer.py
Balancer._get_assumptions
def _get_assumptions(t): """ Given a constraint, _get_assumptions() returns a set of constraints that are implicitly assumed to be true. For example, `x <= 10` would return `x >= 0`. """ if t.op in ('__le__', '__lt__', 'ULE', 'ULT'): return [ t.args[0] >= 0 ] elif t.op in ('__ge__', '__gt__', 'UGE', 'UGT'): return [ t.args[0] <= 2**len(t.args[0])-1 ] elif t.op in ('SLE', 'SLT'): return [ _all_operations.SGE(t.args[0], -(1 << (len(t.args[0])-1))) ] elif t.op in ('SGE', 'SGT'): return [ _all_operations.SLE(t.args[0], (1 << (len(t.args[0])-1)) - 1) ] else: return [ ]
python
def _get_assumptions(t): """ Given a constraint, _get_assumptions() returns a set of constraints that are implicitly assumed to be true. For example, `x <= 10` would return `x >= 0`. """ if t.op in ('__le__', '__lt__', 'ULE', 'ULT'): return [ t.args[0] >= 0 ] elif t.op in ('__ge__', '__gt__', 'UGE', 'UGT'): return [ t.args[0] <= 2**len(t.args[0])-1 ] elif t.op in ('SLE', 'SLT'): return [ _all_operations.SGE(t.args[0], -(1 << (len(t.args[0])-1))) ] elif t.op in ('SGE', 'SGT'): return [ _all_operations.SLE(t.args[0], (1 << (len(t.args[0])-1)) - 1) ] else: return [ ]
['def', '_get_assumptions', '(', 't', ')', ':', 'if', 't', '.', 'op', 'in', '(', "'__le__'", ',', "'__lt__'", ',', "'ULE'", ',', "'ULT'", ')', ':', 'return', '[', 't', '.', 'args', '[', '0', ']', '>=', '0', ']', 'elif', 't', '.', 'op', 'in', '(', "'__ge__'", ',', "'__gt__'", ',', "'UGE'", ',', "'UGT'", ')', ':', 'return', '[', 't', '.', 'args', '[', '0', ']', '<=', '2', '**', 'len', '(', 't', '.', 'args', '[', '0', ']', ')', '-', '1', ']', 'elif', 't', '.', 'op', 'in', '(', "'SLE'", ',', "'SLT'", ')', ':', 'return', '[', '_all_operations', '.', 'SGE', '(', 't', '.', 'args', '[', '0', ']', ',', '-', '(', '1', '<<', '(', 'len', '(', 't', '.', 'args', '[', '0', ']', ')', '-', '1', ')', ')', ')', ']', 'elif', 't', '.', 'op', 'in', '(', "'SGE'", ',', "'SGT'", ')', ':', 'return', '[', '_all_operations', '.', 'SLE', '(', 't', '.', 'args', '[', '0', ']', ',', '(', '1', '<<', '(', 'len', '(', 't', '.', 'args', '[', '0', ']', ')', '-', '1', ')', ')', '-', '1', ')', ']', 'else', ':', 'return', '[', ']']
Given a constraint, _get_assumptions() returns a set of constraints that are implicitly assumed to be true. For example, `x <= 10` would return `x >= 0`.
['Given', 'a', 'constraint', '_get_assumptions', '()', 'returns', 'a', 'set', 'of', 'constraints', 'that', 'are', 'implicitly', 'assumed', 'to', 'be', 'true', '.', 'For', 'example', 'x', '<', '=', '10', 'would', 'return', 'x', '>', '=', '0', '.']
train
https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/balancer.py#L266-L281
5,295
jupyter-widgets/ipywidgets
ipywidgets/widgets/widget.py
Widget.on_msg
def on_msg(self, callback, remove=False): """(Un)Register a custom msg receive callback. Parameters ---------- callback: callable callback will be passed three arguments when a message arrives:: callback(widget, content, buffers) remove: bool True if the callback should be unregistered.""" self._msg_callbacks.register_callback(callback, remove=remove)
python
def on_msg(self, callback, remove=False): """(Un)Register a custom msg receive callback. Parameters ---------- callback: callable callback will be passed three arguments when a message arrives:: callback(widget, content, buffers) remove: bool True if the callback should be unregistered.""" self._msg_callbacks.register_callback(callback, remove=remove)
['def', 'on_msg', '(', 'self', ',', 'callback', ',', 'remove', '=', 'False', ')', ':', 'self', '.', '_msg_callbacks', '.', 'register_callback', '(', 'callback', ',', 'remove', '=', 'remove', ')']
(Un)Register a custom msg receive callback. Parameters ---------- callback: callable callback will be passed three arguments when a message arrives:: callback(widget, content, buffers) remove: bool True if the callback should be unregistered.
['(', 'Un', ')', 'Register', 'a', 'custom', 'msg', 'receive', 'callback', '.']
train
https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/widget.py#L558-L570
5,296
Cadair/jupyter_environment_kernels
environment_kernels/core.py
EnvironmentKernelSpecManager.find_kernel_specs_for_envs
def find_kernel_specs_for_envs(self): """Returns a dict mapping kernel names to resource directories.""" data = self._get_env_data() return {name: data[name][0] for name in data}
python
def find_kernel_specs_for_envs(self): """Returns a dict mapping kernel names to resource directories.""" data = self._get_env_data() return {name: data[name][0] for name in data}
['def', 'find_kernel_specs_for_envs', '(', 'self', ')', ':', 'data', '=', 'self', '.', '_get_env_data', '(', ')', 'return', '{', 'name', ':', 'data', '[', 'name', ']', '[', '0', ']', 'for', 'name', 'in', 'data', '}']
Returns a dict mapping kernel names to resource directories.
['Returns', 'a', 'dict', 'mapping', 'kernel', 'names', 'to', 'resource', 'directories', '.']
train
https://github.com/Cadair/jupyter_environment_kernels/blob/3da304550b511bda7d5d39280379b5ca39bb31bc/environment_kernels/core.py#L176-L179
5,297
inveniosoftware/invenio-records-rest
invenio_records_rest/utils.py
check_elasticsearch
def check_elasticsearch(record, *args, **kwargs): """Return permission that check if the record exists in ES index. :params record: A record object. :returns: A object instance with a ``can()`` method. """ def can(self): """Try to search for given record.""" search = request._methodview.search_class() search = search.get_record(str(record.id)) return search.count() == 1 return type('CheckES', (), {'can': can})()
python
def check_elasticsearch(record, *args, **kwargs): """Return permission that check if the record exists in ES index. :params record: A record object. :returns: A object instance with a ``can()`` method. """ def can(self): """Try to search for given record.""" search = request._methodview.search_class() search = search.get_record(str(record.id)) return search.count() == 1 return type('CheckES', (), {'can': can})()
['def', 'check_elasticsearch', '(', 'record', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'def', 'can', '(', 'self', ')', ':', '"""Try to search for given record."""', 'search', '=', 'request', '.', '_methodview', '.', 'search_class', '(', ')', 'search', '=', 'search', '.', 'get_record', '(', 'str', '(', 'record', '.', 'id', ')', ')', 'return', 'search', '.', 'count', '(', ')', '==', '1', 'return', 'type', '(', "'CheckES'", ',', '(', ')', ',', '{', "'can'", ':', 'can', '}', ')', '(', ')']
Return permission that check if the record exists in ES index. :params record: A record object. :returns: A object instance with a ``can()`` method.
['Return', 'permission', 'that', 'check', 'if', 'the', 'record', 'exists', 'in', 'ES', 'index', '.']
train
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/utils.py#L100-L112
5,298
wummel/linkchecker
linkcheck/logger/__init__.py
_Logger.encode
def encode (self, s): """Encode string with output encoding.""" assert isinstance(s, unicode) return s.encode(self.output_encoding, self.codec_errors)
python
def encode (self, s): """Encode string with output encoding.""" assert isinstance(s, unicode) return s.encode(self.output_encoding, self.codec_errors)
['def', 'encode', '(', 'self', ',', 's', ')', ':', 'assert', 'isinstance', '(', 's', ',', 'unicode', ')', 'return', 's', '.', 'encode', '(', 'self', '.', 'output_encoding', ',', 'self', '.', 'codec_errors', ')']
Encode string with output encoding.
['Encode', 'string', 'with', 'output', 'encoding', '.']
train
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/__init__.py#L208-L211
5,299
secynic/ipwhois
ipwhois/experimental.py
bulk_lookup_rdap
def bulk_lookup_rdap(addresses=None, inc_raw=False, retry_count=3, depth=0, excluded_entities=None, rate_limit_timeout=60, socket_timeout=10, asn_timeout=240, proxy_openers=None): """ The function for bulk retrieving and parsing whois information for a list of IP addresses via HTTP (RDAP). This bulk lookup method uses bulk ASN Whois lookups first to retrieve the ASN for each IP. It then optimizes RDAP queries to achieve the fastest overall time, accounting for rate-limiting RIRs. Args: addresses (:obj:`list` of :obj:`str`): IP addresses to lookup. inc_raw (:obj:`bool`, optional): Whether to include the raw whois results in the returned dictionary. Defaults to False. retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. depth (:obj:`int`): How many levels deep to run queries when additional referenced objects are found. Defaults to 0. excluded_entities (:obj:`list` of :obj:`str`): Entity handles to not perform lookups. Defaults to None. rate_limit_timeout (:obj:`int`): The number of seconds to wait before retrying when a rate limit notice is returned via rdap+json. Defaults to 60. socket_timeout (:obj:`int`): The default timeout for socket connections in seconds. Defaults to 10. asn_timeout (:obj:`int`): The default timeout for bulk ASN lookups in seconds. Defaults to 240. proxy_openers (:obj:`list` of :obj:`OpenerDirector`): Proxy openers for single/rotating proxy support. Defaults to None. Returns: namedtuple: :results (dict): IP address keys with the values as dictionaries returned by IPWhois.lookup_rdap(). :stats (dict): Stats for the lookups: :: { 'ip_input_total' (int) - The total number of addresses originally provided for lookup via the addresses argument. 'ip_unique_total' (int) - The total number of unique addresses found in the addresses argument. 'ip_lookup_total' (int) - The total number of addresses that lookups were attempted for, excluding any that failed ASN registry checks. 'lacnic' (dict) - { 'failed' (list) - The addresses that failed to lookup. Excludes any that failed initially, but succeeded after futher retries. 'rate_limited' (list) - The addresses that encountered rate-limiting. Unless an address is also in 'failed', it eventually succeeded. 'total' (int) - The total number of addresses belonging to this RIR that lookups were attempted for. } 'ripencc' (dict) - Same as 'lacnic' above. 'apnic' (dict) - Same as 'lacnic' above. 'afrinic' (dict) - Same as 'lacnic' above. 'arin' (dict) - Same as 'lacnic' above. 'unallocated_addresses' (list) - The addresses that are unallocated/failed ASN lookups. These can be addresses that are not listed for one of the 5 RIRs (other). No attempt was made to perform an RDAP lookup for these. } Raises: ASNLookupError: The ASN bulk lookup failed, cannot proceed with bulk RDAP lookup. """ if not isinstance(addresses, list): raise ValueError('addresses must be a list of IP address strings') # Initialize the dicts/lists results = {} failed_lookups_dict = {} rated_lookups = [] stats = { 'ip_input_total': len(addresses), 'ip_unique_total': 0, 'ip_lookup_total': 0, 'lacnic': {'failed': [], 'rate_limited': [], 'total': 0}, 'ripencc': {'failed': [], 'rate_limited': [], 'total': 0}, 'apnic': {'failed': [], 'rate_limited': [], 'total': 0}, 'afrinic': {'failed': [], 'rate_limited': [], 'total': 0}, 'arin': {'failed': [], 'rate_limited': [], 'total': 0}, 'unallocated_addresses': [] } asn_parsed_results = {} if proxy_openers is None: proxy_openers = [None] proxy_openers_copy = iter(proxy_openers) # Make sure addresses is unique unique_ip_list = list(unique_everseen(addresses)) # Get the unique count to return stats['ip_unique_total'] = len(unique_ip_list) # This is needed for iteration order rir_keys_ordered = ['lacnic', 'ripencc', 'apnic', 'afrinic', 'arin'] # First query the ASN data for all IPs, can raise ASNLookupError, no catch bulk_asn = get_bulk_asn_whois(unique_ip_list, timeout=asn_timeout) # ASN results are returned as string, parse lines to list and remove first asn_result_list = bulk_asn.split('\n') del asn_result_list[0] # We need to instantiate IPASN, which currently needs a Net object, # IP doesn't matter here net = Net('1.2.3.4') ipasn = IPASN(net) # Iterate each IP ASN result, and add valid RIR results to # asn_parsed_results for RDAP lookups for asn_result in asn_result_list: temp = asn_result.split('|') # Not a valid entry, move on to next if len(temp) == 1: continue ip = temp[1].strip() # We need this since ASN bulk lookup is returning duplicates # This is an issue on the Cymru end if ip in asn_parsed_results.keys(): # pragma: no cover continue try: results = ipasn.parse_fields_whois(asn_result) except ASNRegistryError: # pragma: no cover continue # Add valid IP ASN result to asn_parsed_results for RDAP lookup asn_parsed_results[ip] = results stats[results['asn_registry']]['total'] += 1 # Set the list of IPs that are not allocated/failed ASN lookup stats['unallocated_addresses'] = list(k for k in addresses if k not in asn_parsed_results) # Set the total lookup count after unique IP and ASN result filtering stats['ip_lookup_total'] = len(asn_parsed_results) # Track the total number of LACNIC queries left. This is tracked in order # to ensure the 9 priority LACNIC queries/min don't go into infinite loop lacnic_total_left = stats['lacnic']['total'] # Set the start time, this value is updated when the rate limit is reset old_time = time.time() # Rate limit tracking dict for all RIRs rate_tracker = { 'lacnic': {'time': old_time, 'count': 0}, 'ripencc': {'time': old_time, 'count': 0}, 'apnic': {'time': old_time, 'count': 0}, 'afrinic': {'time': old_time, 'count': 0}, 'arin': {'time': old_time, 'count': 0} } # Iterate all of the IPs to perform RDAP lookups until none are left while len(asn_parsed_results) > 0: # Sequentially run through each RIR to minimize lookups in a row to # the same RIR. for rir in rir_keys_ordered: # If there are still LACNIC IPs left to lookup and the rate limit # hasn't been reached, skip to find a LACNIC IP to lookup if ( rir != 'lacnic' and lacnic_total_left > 0 and (rate_tracker['lacnic']['count'] != 9 or (time.time() - rate_tracker['lacnic']['time'] ) >= rate_limit_timeout ) ): # pragma: no cover continue # If the RIR rate limit has been reached and hasn't expired, # move on to the next RIR if ( rate_tracker[rir]['count'] == 9 and ( (time.time() - rate_tracker[rir]['time'] ) < rate_limit_timeout) ): # pragma: no cover continue # If the RIR rate limit has expired, reset the count/timer # and perform the lookup elif ((time.time() - rate_tracker[rir]['time'] ) >= rate_limit_timeout): # pragma: no cover rate_tracker[rir]['count'] = 0 rate_tracker[rir]['time'] = time.time() # Create a copy of the lookup IP dict so we can modify on # successful/failed queries. Loop each IP until it matches the # correct RIR in the parent loop, and attempt lookup tmp_dict = asn_parsed_results.copy() for ip, asn_data in tmp_dict.items(): # Check to see if IP matches parent loop RIR for lookup if asn_data['asn_registry'] == rir: log.debug('Starting lookup for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Add to count for rate-limit tracking only for LACNIC, # since we have not seen aggressive rate-limiting from the # other RIRs yet if rir == 'lacnic': rate_tracker[rir]['count'] += 1 # Get the next proxy opener to use, or None try: opener = next(proxy_openers_copy) # Start at the beginning if all have been used except StopIteration: proxy_openers_copy = iter(proxy_openers) opener = next(proxy_openers_copy) # Instantiate the objects needed for the RDAP lookup net = Net(ip, timeout=socket_timeout, proxy_opener=opener) rdap = RDAP(net) try: # Perform the RDAP lookup. retry_count is set to 0 # here since we handle that in this function results = rdap.lookup( inc_raw=inc_raw, retry_count=0, asn_data=asn_data, depth=depth, excluded_entities=excluded_entities ) log.debug('Successful lookup for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Lookup was successful, add to result. Set the nir # key to None as this is not supported # (yet - requires more queries) results[ip] = results results[ip]['nir'] = None # Remove the IP from the lookup queue del asn_parsed_results[ip] # If this was LACNIC IP, reduce the total left count if rir == 'lacnic': lacnic_total_left -= 1 log.debug( '{0} total lookups left, {1} LACNIC lookups left' ''.format(str(len(asn_parsed_results)), str(lacnic_total_left)) ) # If this IP failed previously, remove it from the # failed return dict if ( ip in failed_lookups_dict.keys() ): # pragma: no cover del failed_lookups_dict[ip] # Break out of the IP list loop, we need to change to # the next RIR break except HTTPLookupError: # pragma: no cover log.debug('Failed lookup for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Add the IP to the failed lookups dict if not there if ip not in failed_lookups_dict.keys(): failed_lookups_dict[ip] = 1 # This IP has already failed at least once, increment # the failure count until retry_count reached, then # stop trying else: failed_lookups_dict[ip] += 1 if failed_lookups_dict[ip] == retry_count: del asn_parsed_results[ip] stats[rir]['failed'].append(ip) if rir == 'lacnic': lacnic_total_left -= 1 # Since this IP failed, we don't break to move to next # RIR, we check the next IP for this RIR continue except HTTPRateLimitError: # pragma: no cover # Add the IP to the rate-limited lookups dict if not # there if ip not in rated_lookups: rated_lookups.append(ip) stats[rir]['rate_limited'].append(ip) log.debug('Rate limiting triggered for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Since rate-limit was reached, reset the timer and # max out the count rate_tracker[rir]['time'] = time.time() rate_tracker[rir]['count'] = 9 # Break out of the IP list loop, we need to change to # the next RIR break return_tuple = namedtuple('return_tuple', ['results', 'stats']) return return_tuple(results, stats)
python
def bulk_lookup_rdap(addresses=None, inc_raw=False, retry_count=3, depth=0, excluded_entities=None, rate_limit_timeout=60, socket_timeout=10, asn_timeout=240, proxy_openers=None): """ The function for bulk retrieving and parsing whois information for a list of IP addresses via HTTP (RDAP). This bulk lookup method uses bulk ASN Whois lookups first to retrieve the ASN for each IP. It then optimizes RDAP queries to achieve the fastest overall time, accounting for rate-limiting RIRs. Args: addresses (:obj:`list` of :obj:`str`): IP addresses to lookup. inc_raw (:obj:`bool`, optional): Whether to include the raw whois results in the returned dictionary. Defaults to False. retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. depth (:obj:`int`): How many levels deep to run queries when additional referenced objects are found. Defaults to 0. excluded_entities (:obj:`list` of :obj:`str`): Entity handles to not perform lookups. Defaults to None. rate_limit_timeout (:obj:`int`): The number of seconds to wait before retrying when a rate limit notice is returned via rdap+json. Defaults to 60. socket_timeout (:obj:`int`): The default timeout for socket connections in seconds. Defaults to 10. asn_timeout (:obj:`int`): The default timeout for bulk ASN lookups in seconds. Defaults to 240. proxy_openers (:obj:`list` of :obj:`OpenerDirector`): Proxy openers for single/rotating proxy support. Defaults to None. Returns: namedtuple: :results (dict): IP address keys with the values as dictionaries returned by IPWhois.lookup_rdap(). :stats (dict): Stats for the lookups: :: { 'ip_input_total' (int) - The total number of addresses originally provided for lookup via the addresses argument. 'ip_unique_total' (int) - The total number of unique addresses found in the addresses argument. 'ip_lookup_total' (int) - The total number of addresses that lookups were attempted for, excluding any that failed ASN registry checks. 'lacnic' (dict) - { 'failed' (list) - The addresses that failed to lookup. Excludes any that failed initially, but succeeded after futher retries. 'rate_limited' (list) - The addresses that encountered rate-limiting. Unless an address is also in 'failed', it eventually succeeded. 'total' (int) - The total number of addresses belonging to this RIR that lookups were attempted for. } 'ripencc' (dict) - Same as 'lacnic' above. 'apnic' (dict) - Same as 'lacnic' above. 'afrinic' (dict) - Same as 'lacnic' above. 'arin' (dict) - Same as 'lacnic' above. 'unallocated_addresses' (list) - The addresses that are unallocated/failed ASN lookups. These can be addresses that are not listed for one of the 5 RIRs (other). No attempt was made to perform an RDAP lookup for these. } Raises: ASNLookupError: The ASN bulk lookup failed, cannot proceed with bulk RDAP lookup. """ if not isinstance(addresses, list): raise ValueError('addresses must be a list of IP address strings') # Initialize the dicts/lists results = {} failed_lookups_dict = {} rated_lookups = [] stats = { 'ip_input_total': len(addresses), 'ip_unique_total': 0, 'ip_lookup_total': 0, 'lacnic': {'failed': [], 'rate_limited': [], 'total': 0}, 'ripencc': {'failed': [], 'rate_limited': [], 'total': 0}, 'apnic': {'failed': [], 'rate_limited': [], 'total': 0}, 'afrinic': {'failed': [], 'rate_limited': [], 'total': 0}, 'arin': {'failed': [], 'rate_limited': [], 'total': 0}, 'unallocated_addresses': [] } asn_parsed_results = {} if proxy_openers is None: proxy_openers = [None] proxy_openers_copy = iter(proxy_openers) # Make sure addresses is unique unique_ip_list = list(unique_everseen(addresses)) # Get the unique count to return stats['ip_unique_total'] = len(unique_ip_list) # This is needed for iteration order rir_keys_ordered = ['lacnic', 'ripencc', 'apnic', 'afrinic', 'arin'] # First query the ASN data for all IPs, can raise ASNLookupError, no catch bulk_asn = get_bulk_asn_whois(unique_ip_list, timeout=asn_timeout) # ASN results are returned as string, parse lines to list and remove first asn_result_list = bulk_asn.split('\n') del asn_result_list[0] # We need to instantiate IPASN, which currently needs a Net object, # IP doesn't matter here net = Net('1.2.3.4') ipasn = IPASN(net) # Iterate each IP ASN result, and add valid RIR results to # asn_parsed_results for RDAP lookups for asn_result in asn_result_list: temp = asn_result.split('|') # Not a valid entry, move on to next if len(temp) == 1: continue ip = temp[1].strip() # We need this since ASN bulk lookup is returning duplicates # This is an issue on the Cymru end if ip in asn_parsed_results.keys(): # pragma: no cover continue try: results = ipasn.parse_fields_whois(asn_result) except ASNRegistryError: # pragma: no cover continue # Add valid IP ASN result to asn_parsed_results for RDAP lookup asn_parsed_results[ip] = results stats[results['asn_registry']]['total'] += 1 # Set the list of IPs that are not allocated/failed ASN lookup stats['unallocated_addresses'] = list(k for k in addresses if k not in asn_parsed_results) # Set the total lookup count after unique IP and ASN result filtering stats['ip_lookup_total'] = len(asn_parsed_results) # Track the total number of LACNIC queries left. This is tracked in order # to ensure the 9 priority LACNIC queries/min don't go into infinite loop lacnic_total_left = stats['lacnic']['total'] # Set the start time, this value is updated when the rate limit is reset old_time = time.time() # Rate limit tracking dict for all RIRs rate_tracker = { 'lacnic': {'time': old_time, 'count': 0}, 'ripencc': {'time': old_time, 'count': 0}, 'apnic': {'time': old_time, 'count': 0}, 'afrinic': {'time': old_time, 'count': 0}, 'arin': {'time': old_time, 'count': 0} } # Iterate all of the IPs to perform RDAP lookups until none are left while len(asn_parsed_results) > 0: # Sequentially run through each RIR to minimize lookups in a row to # the same RIR. for rir in rir_keys_ordered: # If there are still LACNIC IPs left to lookup and the rate limit # hasn't been reached, skip to find a LACNIC IP to lookup if ( rir != 'lacnic' and lacnic_total_left > 0 and (rate_tracker['lacnic']['count'] != 9 or (time.time() - rate_tracker['lacnic']['time'] ) >= rate_limit_timeout ) ): # pragma: no cover continue # If the RIR rate limit has been reached and hasn't expired, # move on to the next RIR if ( rate_tracker[rir]['count'] == 9 and ( (time.time() - rate_tracker[rir]['time'] ) < rate_limit_timeout) ): # pragma: no cover continue # If the RIR rate limit has expired, reset the count/timer # and perform the lookup elif ((time.time() - rate_tracker[rir]['time'] ) >= rate_limit_timeout): # pragma: no cover rate_tracker[rir]['count'] = 0 rate_tracker[rir]['time'] = time.time() # Create a copy of the lookup IP dict so we can modify on # successful/failed queries. Loop each IP until it matches the # correct RIR in the parent loop, and attempt lookup tmp_dict = asn_parsed_results.copy() for ip, asn_data in tmp_dict.items(): # Check to see if IP matches parent loop RIR for lookup if asn_data['asn_registry'] == rir: log.debug('Starting lookup for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Add to count for rate-limit tracking only for LACNIC, # since we have not seen aggressive rate-limiting from the # other RIRs yet if rir == 'lacnic': rate_tracker[rir]['count'] += 1 # Get the next proxy opener to use, or None try: opener = next(proxy_openers_copy) # Start at the beginning if all have been used except StopIteration: proxy_openers_copy = iter(proxy_openers) opener = next(proxy_openers_copy) # Instantiate the objects needed for the RDAP lookup net = Net(ip, timeout=socket_timeout, proxy_opener=opener) rdap = RDAP(net) try: # Perform the RDAP lookup. retry_count is set to 0 # here since we handle that in this function results = rdap.lookup( inc_raw=inc_raw, retry_count=0, asn_data=asn_data, depth=depth, excluded_entities=excluded_entities ) log.debug('Successful lookup for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Lookup was successful, add to result. Set the nir # key to None as this is not supported # (yet - requires more queries) results[ip] = results results[ip]['nir'] = None # Remove the IP from the lookup queue del asn_parsed_results[ip] # If this was LACNIC IP, reduce the total left count if rir == 'lacnic': lacnic_total_left -= 1 log.debug( '{0} total lookups left, {1} LACNIC lookups left' ''.format(str(len(asn_parsed_results)), str(lacnic_total_left)) ) # If this IP failed previously, remove it from the # failed return dict if ( ip in failed_lookups_dict.keys() ): # pragma: no cover del failed_lookups_dict[ip] # Break out of the IP list loop, we need to change to # the next RIR break except HTTPLookupError: # pragma: no cover log.debug('Failed lookup for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Add the IP to the failed lookups dict if not there if ip not in failed_lookups_dict.keys(): failed_lookups_dict[ip] = 1 # This IP has already failed at least once, increment # the failure count until retry_count reached, then # stop trying else: failed_lookups_dict[ip] += 1 if failed_lookups_dict[ip] == retry_count: del asn_parsed_results[ip] stats[rir]['failed'].append(ip) if rir == 'lacnic': lacnic_total_left -= 1 # Since this IP failed, we don't break to move to next # RIR, we check the next IP for this RIR continue except HTTPRateLimitError: # pragma: no cover # Add the IP to the rate-limited lookups dict if not # there if ip not in rated_lookups: rated_lookups.append(ip) stats[rir]['rate_limited'].append(ip) log.debug('Rate limiting triggered for IP: {0} ' 'RIR: {1}'.format(ip, rir)) # Since rate-limit was reached, reset the timer and # max out the count rate_tracker[rir]['time'] = time.time() rate_tracker[rir]['count'] = 9 # Break out of the IP list loop, we need to change to # the next RIR break return_tuple = namedtuple('return_tuple', ['results', 'stats']) return return_tuple(results, stats)
['def', 'bulk_lookup_rdap', '(', 'addresses', '=', 'None', ',', 'inc_raw', '=', 'False', ',', 'retry_count', '=', '3', ',', 'depth', '=', '0', ',', 'excluded_entities', '=', 'None', ',', 'rate_limit_timeout', '=', '60', ',', 'socket_timeout', '=', '10', ',', 'asn_timeout', '=', '240', ',', 'proxy_openers', '=', 'None', ')', ':', 'if', 'not', 'isinstance', '(', 'addresses', ',', 'list', ')', ':', 'raise', 'ValueError', '(', "'addresses must be a list of IP address strings'", ')', '# Initialize the dicts/lists', 'results', '=', '{', '}', 'failed_lookups_dict', '=', '{', '}', 'rated_lookups', '=', '[', ']', 'stats', '=', '{', "'ip_input_total'", ':', 'len', '(', 'addresses', ')', ',', "'ip_unique_total'", ':', '0', ',', "'ip_lookup_total'", ':', '0', ',', "'lacnic'", ':', '{', "'failed'", ':', '[', ']', ',', "'rate_limited'", ':', '[', ']', ',', "'total'", ':', '0', '}', ',', "'ripencc'", ':', '{', "'failed'", ':', '[', ']', ',', "'rate_limited'", ':', '[', ']', ',', "'total'", ':', '0', '}', ',', "'apnic'", ':', '{', "'failed'", ':', '[', ']', ',', "'rate_limited'", ':', '[', ']', ',', "'total'", ':', '0', '}', ',', "'afrinic'", ':', '{', "'failed'", ':', '[', ']', ',', "'rate_limited'", ':', '[', ']', ',', "'total'", ':', '0', '}', ',', "'arin'", ':', '{', "'failed'", ':', '[', ']', ',', "'rate_limited'", ':', '[', ']', ',', "'total'", ':', '0', '}', ',', "'unallocated_addresses'", ':', '[', ']', '}', 'asn_parsed_results', '=', '{', '}', 'if', 'proxy_openers', 'is', 'None', ':', 'proxy_openers', '=', '[', 'None', ']', 'proxy_openers_copy', '=', 'iter', '(', 'proxy_openers', ')', '# Make sure addresses is unique', 'unique_ip_list', '=', 'list', '(', 'unique_everseen', '(', 'addresses', ')', ')', '# Get the unique count to return', 'stats', '[', "'ip_unique_total'", ']', '=', 'len', '(', 'unique_ip_list', ')', '# This is needed for iteration order', 'rir_keys_ordered', '=', '[', "'lacnic'", ',', "'ripencc'", ',', "'apnic'", ',', "'afrinic'", ',', "'arin'", ']', '# First query the ASN data for all IPs, can raise ASNLookupError, no catch', 'bulk_asn', '=', 'get_bulk_asn_whois', '(', 'unique_ip_list', ',', 'timeout', '=', 'asn_timeout', ')', '# ASN results are returned as string, parse lines to list and remove first', 'asn_result_list', '=', 'bulk_asn', '.', 'split', '(', "'\\n'", ')', 'del', 'asn_result_list', '[', '0', ']', '# We need to instantiate IPASN, which currently needs a Net object,', "# IP doesn't matter here", 'net', '=', 'Net', '(', "'1.2.3.4'", ')', 'ipasn', '=', 'IPASN', '(', 'net', ')', '# Iterate each IP ASN result, and add valid RIR results to', '# asn_parsed_results for RDAP lookups', 'for', 'asn_result', 'in', 'asn_result_list', ':', 'temp', '=', 'asn_result', '.', 'split', '(', "'|'", ')', '# Not a valid entry, move on to next', 'if', 'len', '(', 'temp', ')', '==', '1', ':', 'continue', 'ip', '=', 'temp', '[', '1', ']', '.', 'strip', '(', ')', '# We need this since ASN bulk lookup is returning duplicates', '# This is an issue on the Cymru end', 'if', 'ip', 'in', 'asn_parsed_results', '.', 'keys', '(', ')', ':', '# pragma: no cover', 'continue', 'try', ':', 'results', '=', 'ipasn', '.', 'parse_fields_whois', '(', 'asn_result', ')', 'except', 'ASNRegistryError', ':', '# pragma: no cover', 'continue', '# Add valid IP ASN result to asn_parsed_results for RDAP lookup', 'asn_parsed_results', '[', 'ip', ']', '=', 'results', 'stats', '[', 'results', '[', "'asn_registry'", ']', ']', '[', "'total'", ']', '+=', '1', '# Set the list of IPs that are not allocated/failed ASN lookup', 'stats', '[', "'unallocated_addresses'", ']', '=', 'list', '(', 'k', 'for', 'k', 'in', 'addresses', 'if', 'k', 'not', 'in', 'asn_parsed_results', ')', '# Set the total lookup count after unique IP and ASN result filtering', 'stats', '[', "'ip_lookup_total'", ']', '=', 'len', '(', 'asn_parsed_results', ')', '# Track the total number of LACNIC queries left. This is tracked in order', "# to ensure the 9 priority LACNIC queries/min don't go into infinite loop", 'lacnic_total_left', '=', 'stats', '[', "'lacnic'", ']', '[', "'total'", ']', '# Set the start time, this value is updated when the rate limit is reset', 'old_time', '=', 'time', '.', 'time', '(', ')', '# Rate limit tracking dict for all RIRs', 'rate_tracker', '=', '{', "'lacnic'", ':', '{', "'time'", ':', 'old_time', ',', "'count'", ':', '0', '}', ',', "'ripencc'", ':', '{', "'time'", ':', 'old_time', ',', "'count'", ':', '0', '}', ',', "'apnic'", ':', '{', "'time'", ':', 'old_time', ',', "'count'", ':', '0', '}', ',', "'afrinic'", ':', '{', "'time'", ':', 'old_time', ',', "'count'", ':', '0', '}', ',', "'arin'", ':', '{', "'time'", ':', 'old_time', ',', "'count'", ':', '0', '}', '}', '# Iterate all of the IPs to perform RDAP lookups until none are left', 'while', 'len', '(', 'asn_parsed_results', ')', '>', '0', ':', '# Sequentially run through each RIR to minimize lookups in a row to', '# the same RIR.', 'for', 'rir', 'in', 'rir_keys_ordered', ':', '# If there are still LACNIC IPs left to lookup and the rate limit', "# hasn't been reached, skip to find a LACNIC IP to lookup", 'if', '(', 'rir', '!=', "'lacnic'", 'and', 'lacnic_total_left', '>', '0', 'and', '(', 'rate_tracker', '[', "'lacnic'", ']', '[', "'count'", ']', '!=', '9', 'or', '(', 'time', '.', 'time', '(', ')', '-', 'rate_tracker', '[', "'lacnic'", ']', '[', "'time'", ']', ')', '>=', 'rate_limit_timeout', ')', ')', ':', '# pragma: no cover', 'continue', "# If the RIR rate limit has been reached and hasn't expired,", '# move on to the next RIR', 'if', '(', 'rate_tracker', '[', 'rir', ']', '[', "'count'", ']', '==', '9', 'and', '(', '(', 'time', '.', 'time', '(', ')', '-', 'rate_tracker', '[', 'rir', ']', '[', "'time'", ']', ')', '<', 'rate_limit_timeout', ')', ')', ':', '# pragma: no cover', 'continue', '# If the RIR rate limit has expired, reset the count/timer', '# and perform the lookup', 'elif', '(', '(', 'time', '.', 'time', '(', ')', '-', 'rate_tracker', '[', 'rir', ']', '[', "'time'", ']', ')', '>=', 'rate_limit_timeout', ')', ':', '# pragma: no cover', 'rate_tracker', '[', 'rir', ']', '[', "'count'", ']', '=', '0', 'rate_tracker', '[', 'rir', ']', '[', "'time'", ']', '=', 'time', '.', 'time', '(', ')', '# Create a copy of the lookup IP dict so we can modify on', '# successful/failed queries. Loop each IP until it matches the', '# correct RIR in the parent loop, and attempt lookup', 'tmp_dict', '=', 'asn_parsed_results', '.', 'copy', '(', ')', 'for', 'ip', ',', 'asn_data', 'in', 'tmp_dict', '.', 'items', '(', ')', ':', '# Check to see if IP matches parent loop RIR for lookup', 'if', 'asn_data', '[', "'asn_registry'", ']', '==', 'rir', ':', 'log', '.', 'debug', '(', "'Starting lookup for IP: {0} '", "'RIR: {1}'", '.', 'format', '(', 'ip', ',', 'rir', ')', ')', '# Add to count for rate-limit tracking only for LACNIC,', '# since we have not seen aggressive rate-limiting from the', '# other RIRs yet', 'if', 'rir', '==', "'lacnic'", ':', 'rate_tracker', '[', 'rir', ']', '[', "'count'", ']', '+=', '1', '# Get the next proxy opener to use, or None', 'try', ':', 'opener', '=', 'next', '(', 'proxy_openers_copy', ')', '# Start at the beginning if all have been used', 'except', 'StopIteration', ':', 'proxy_openers_copy', '=', 'iter', '(', 'proxy_openers', ')', 'opener', '=', 'next', '(', 'proxy_openers_copy', ')', '# Instantiate the objects needed for the RDAP lookup', 'net', '=', 'Net', '(', 'ip', ',', 'timeout', '=', 'socket_timeout', ',', 'proxy_opener', '=', 'opener', ')', 'rdap', '=', 'RDAP', '(', 'net', ')', 'try', ':', '# Perform the RDAP lookup. retry_count is set to 0', '# here since we handle that in this function', 'results', '=', 'rdap', '.', 'lookup', '(', 'inc_raw', '=', 'inc_raw', ',', 'retry_count', '=', '0', ',', 'asn_data', '=', 'asn_data', ',', 'depth', '=', 'depth', ',', 'excluded_entities', '=', 'excluded_entities', ')', 'log', '.', 'debug', '(', "'Successful lookup for IP: {0} '", "'RIR: {1}'", '.', 'format', '(', 'ip', ',', 'rir', ')', ')', '# Lookup was successful, add to result. Set the nir', '# key to None as this is not supported', '# (yet - requires more queries)', 'results', '[', 'ip', ']', '=', 'results', 'results', '[', 'ip', ']', '[', "'nir'", ']', '=', 'None', '# Remove the IP from the lookup queue', 'del', 'asn_parsed_results', '[', 'ip', ']', '# If this was LACNIC IP, reduce the total left count', 'if', 'rir', '==', "'lacnic'", ':', 'lacnic_total_left', '-=', '1', 'log', '.', 'debug', '(', "'{0} total lookups left, {1} LACNIC lookups left'", "''", '.', 'format', '(', 'str', '(', 'len', '(', 'asn_parsed_results', ')', ')', ',', 'str', '(', 'lacnic_total_left', ')', ')', ')', '# If this IP failed previously, remove it from the', '# failed return dict', 'if', '(', 'ip', 'in', 'failed_lookups_dict', '.', 'keys', '(', ')', ')', ':', '# pragma: no cover', 'del', 'failed_lookups_dict', '[', 'ip', ']', '# Break out of the IP list loop, we need to change to', '# the next RIR', 'break', 'except', 'HTTPLookupError', ':', '# pragma: no cover', 'log', '.', 'debug', '(', "'Failed lookup for IP: {0} '", "'RIR: {1}'", '.', 'format', '(', 'ip', ',', 'rir', ')', ')', '# Add the IP to the failed lookups dict if not there', 'if', 'ip', 'not', 'in', 'failed_lookups_dict', '.', 'keys', '(', ')', ':', 'failed_lookups_dict', '[', 'ip', ']', '=', '1', '# This IP has already failed at least once, increment', '# the failure count until retry_count reached, then', '# stop trying', 'else', ':', 'failed_lookups_dict', '[', 'ip', ']', '+=', '1', 'if', 'failed_lookups_dict', '[', 'ip', ']', '==', 'retry_count', ':', 'del', 'asn_parsed_results', '[', 'ip', ']', 'stats', '[', 'rir', ']', '[', "'failed'", ']', '.', 'append', '(', 'ip', ')', 'if', 'rir', '==', "'lacnic'", ':', 'lacnic_total_left', '-=', '1', "# Since this IP failed, we don't break to move to next", '# RIR, we check the next IP for this RIR', 'continue', 'except', 'HTTPRateLimitError', ':', '# pragma: no cover', '# Add the IP to the rate-limited lookups dict if not', '# there', 'if', 'ip', 'not', 'in', 'rated_lookups', ':', 'rated_lookups', '.', 'append', '(', 'ip', ')', 'stats', '[', 'rir', ']', '[', "'rate_limited'", ']', '.', 'append', '(', 'ip', ')', 'log', '.', 'debug', '(', "'Rate limiting triggered for IP: {0} '", "'RIR: {1}'", '.', 'format', '(', 'ip', ',', 'rir', ')', ')', '# Since rate-limit was reached, reset the timer and', '# max out the count', 'rate_tracker', '[', 'rir', ']', '[', "'time'", ']', '=', 'time', '.', 'time', '(', ')', 'rate_tracker', '[', 'rir', ']', '[', "'count'", ']', '=', '9', '# Break out of the IP list loop, we need to change to', '# the next RIR', 'break', 'return_tuple', '=', 'namedtuple', '(', "'return_tuple'", ',', '[', "'results'", ',', "'stats'", ']', ')', 'return', 'return_tuple', '(', 'results', ',', 'stats', ')']
The function for bulk retrieving and parsing whois information for a list of IP addresses via HTTP (RDAP). This bulk lookup method uses bulk ASN Whois lookups first to retrieve the ASN for each IP. It then optimizes RDAP queries to achieve the fastest overall time, accounting for rate-limiting RIRs. Args: addresses (:obj:`list` of :obj:`str`): IP addresses to lookup. inc_raw (:obj:`bool`, optional): Whether to include the raw whois results in the returned dictionary. Defaults to False. retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. depth (:obj:`int`): How many levels deep to run queries when additional referenced objects are found. Defaults to 0. excluded_entities (:obj:`list` of :obj:`str`): Entity handles to not perform lookups. Defaults to None. rate_limit_timeout (:obj:`int`): The number of seconds to wait before retrying when a rate limit notice is returned via rdap+json. Defaults to 60. socket_timeout (:obj:`int`): The default timeout for socket connections in seconds. Defaults to 10. asn_timeout (:obj:`int`): The default timeout for bulk ASN lookups in seconds. Defaults to 240. proxy_openers (:obj:`list` of :obj:`OpenerDirector`): Proxy openers for single/rotating proxy support. Defaults to None. Returns: namedtuple: :results (dict): IP address keys with the values as dictionaries returned by IPWhois.lookup_rdap(). :stats (dict): Stats for the lookups: :: { 'ip_input_total' (int) - The total number of addresses originally provided for lookup via the addresses argument. 'ip_unique_total' (int) - The total number of unique addresses found in the addresses argument. 'ip_lookup_total' (int) - The total number of addresses that lookups were attempted for, excluding any that failed ASN registry checks. 'lacnic' (dict) - { 'failed' (list) - The addresses that failed to lookup. Excludes any that failed initially, but succeeded after futher retries. 'rate_limited' (list) - The addresses that encountered rate-limiting. Unless an address is also in 'failed', it eventually succeeded. 'total' (int) - The total number of addresses belonging to this RIR that lookups were attempted for. } 'ripencc' (dict) - Same as 'lacnic' above. 'apnic' (dict) - Same as 'lacnic' above. 'afrinic' (dict) - Same as 'lacnic' above. 'arin' (dict) - Same as 'lacnic' above. 'unallocated_addresses' (list) - The addresses that are unallocated/failed ASN lookups. These can be addresses that are not listed for one of the 5 RIRs (other). No attempt was made to perform an RDAP lookup for these. } Raises: ASNLookupError: The ASN bulk lookup failed, cannot proceed with bulk RDAP lookup.
['The', 'function', 'for', 'bulk', 'retrieving', 'and', 'parsing', 'whois', 'information', 'for', 'a', 'list', 'of', 'IP', 'addresses', 'via', 'HTTP', '(', 'RDAP', ')', '.', 'This', 'bulk', 'lookup', 'method', 'uses', 'bulk', 'ASN', 'Whois', 'lookups', 'first', 'to', 'retrieve', 'the', 'ASN', 'for', 'each', 'IP', '.', 'It', 'then', 'optimizes', 'RDAP', 'queries', 'to', 'achieve', 'the', 'fastest', 'overall', 'time', 'accounting', 'for', 'rate', '-', 'limiting', 'RIRs', '.']
train
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/experimental.py#L113-L457