Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
7,100
materialsproject/pymatgen
pymatgen/io/qchem/outputs.py
QCOutput._read_mulliken
def _read_mulliken(self): """ Parses Mulliken charges. Also parses spins given an unrestricted SCF. """ if self.data.get('unrestricted', []): header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+Spin\s\(a\.u\.\)\s+\-+" table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)\s+([\d\-\.]+)" footer_pattern = r"\s\s\-+\s+Sum of atomic charges" else: header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+\-+" table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)" footer_pattern = r"\s\s\-+\s+Sum of atomic charges" temp_mulliken = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern) real_mulliken = [] for one_mulliken in temp_mulliken: if self.data.get('unrestricted', []): temp = np.zeros(shape=(len(one_mulliken), 2)) for ii, entry in enumerate(one_mulliken): temp[ii, 0] = float(entry[0]) temp[ii, 1] = float(entry[1]) else: temp = np.zeros(len(one_mulliken)) for ii, entry in enumerate(one_mulliken): temp[ii] = float(entry[0]) real_mulliken += [temp] self.data["Mulliken"] = real_mulliken
python
def _read_mulliken(self): """ Parses Mulliken charges. Also parses spins given an unrestricted SCF. """ if self.data.get('unrestricted', []): header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+Spin\s\(a\.u\.\)\s+\-+" table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)\s+([\d\-\.]+)" footer_pattern = r"\s\s\-+\s+Sum of atomic charges" else: header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+\-+" table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)" footer_pattern = r"\s\s\-+\s+Sum of atomic charges" temp_mulliken = read_table_pattern(self.text, header_pattern, table_pattern, footer_pattern) real_mulliken = [] for one_mulliken in temp_mulliken: if self.data.get('unrestricted', []): temp = np.zeros(shape=(len(one_mulliken), 2)) for ii, entry in enumerate(one_mulliken): temp[ii, 0] = float(entry[0]) temp[ii, 1] = float(entry[1]) else: temp = np.zeros(len(one_mulliken)) for ii, entry in enumerate(one_mulliken): temp[ii] = float(entry[0]) real_mulliken += [temp] self.data["Mulliken"] = real_mulliken
['def', '_read_mulliken', '(', 'self', ')', ':', 'if', 'self', '.', 'data', '.', 'get', '(', "'unrestricted'", ',', '[', ']', ')', ':', 'header_pattern', '=', 'r"\\-+\\s+Ground-State Mulliken Net Atomic Charges\\s+Atom\\s+Charge \\(a\\.u\\.\\)\\s+Spin\\s\\(a\\.u\\.\\)\\s+\\-+"', 'table_pattern', '=', 'r"\\s+\\d+\\s\\w+\\s+([\\d\\-\\.]+)\\s+([\\d\\-\\.]+)"', 'footer_pattern', '=', 'r"\\s\\s\\-+\\s+Sum of atomic charges"', 'else', ':', 'header_pattern', '=', 'r"\\-+\\s+Ground-State Mulliken Net Atomic Charges\\s+Atom\\s+Charge \\(a\\.u\\.\\)\\s+\\-+"', 'table_pattern', '=', 'r"\\s+\\d+\\s\\w+\\s+([\\d\\-\\.]+)"', 'footer_pattern', '=', 'r"\\s\\s\\-+\\s+Sum of atomic charges"', 'temp_mulliken', '=', 'read_table_pattern', '(', 'self', '.', 'text', ',', 'header_pattern', ',', 'table_pattern', ',', 'footer_pattern', ')', 'real_mulliken', '=', '[', ']', 'for', 'one_mulliken', 'in', 'temp_mulliken', ':', 'if', 'self', '.', 'data', '.', 'get', '(', "'unrestricted'", ',', '[', ']', ')', ':', 'temp', '=', 'np', '.', 'zeros', '(', 'shape', '=', '(', 'len', '(', 'one_mulliken', ')', ',', '2', ')', ')', 'for', 'ii', ',', 'entry', 'in', 'enumerate', '(', 'one_mulliken', ')', ':', 'temp', '[', 'ii', ',', '0', ']', '=', 'float', '(', 'entry', '[', '0', ']', ')', 'temp', '[', 'ii', ',', '1', ']', '=', 'float', '(', 'entry', '[', '1', ']', ')', 'else', ':', 'temp', '=', 'np', '.', 'zeros', '(', 'len', '(', 'one_mulliken', ')', ')', 'for', 'ii', ',', 'entry', 'in', 'enumerate', '(', 'one_mulliken', ')', ':', 'temp', '[', 'ii', ']', '=', 'float', '(', 'entry', '[', '0', ']', ')', 'real_mulliken', '+=', '[', 'temp', ']', 'self', '.', 'data', '[', '"Mulliken"', ']', '=', 'real_mulliken']
Parses Mulliken charges. Also parses spins given an unrestricted SCF.
['Parses', 'Mulliken', 'charges', '.', 'Also', 'parses', 'spins', 'given', 'an', 'unrestricted', 'SCF', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/qchem/outputs.py#L374-L402
7,101
krukas/Trionyx
trionyx/trionyx/views/core.py
DeleteView.get_context_data
def get_context_data(self, **kwargs): """Add context data to view""" context = super().get_context_data(**kwargs) context.update({ 'title': self.title, 'submit_value': self.submit_value, 'cancel_url': self.cancel_url }) return context
python
def get_context_data(self, **kwargs): """Add context data to view""" context = super().get_context_data(**kwargs) context.update({ 'title': self.title, 'submit_value': self.submit_value, 'cancel_url': self.cancel_url }) return context
['def', 'get_context_data', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'context', '=', 'super', '(', ')', '.', 'get_context_data', '(', '*', '*', 'kwargs', ')', 'context', '.', 'update', '(', '{', "'title'", ':', 'self', '.', 'title', ',', "'submit_value'", ':', 'self', '.', 'submit_value', ',', "'cancel_url'", ':', 'self', '.', 'cancel_url', '}', ')', 'return', 'context']
Add context data to view
['Add', 'context', 'data', 'to', 'view']
train
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L612-L620
7,102
rameshg87/pyremotevbox
pyremotevbox/ZSI/address.py
Address._checkFrom
def _checkFrom(self, pyobj): '''WS-Address From, XXX currently not checking the hostname, not forwarding messages. pyobj -- From server returned. ''' if pyobj is None: return value = pyobj._Address if value != self._addressTo: scheme,netloc,path,query,fragment = urlparse.urlsplit(value) hostport = netloc.split(':') schemeF,netlocF,pathF,queryF,fragmentF = urlparse.urlsplit(self._addressTo) if scheme==schemeF and path==pathF and query==queryF and fragment==fragmentF: netloc = netloc.split(':') + ['80'] netlocF = netlocF.split(':') + ['80'] if netloc[1]==netlocF[1] and (socket.gethostbyname(netlocF[0]) in ('127.0.0.1', socket.gethostbyname(netloc[0]))): return raise WSActionException, 'wrong WS-Address From(%s), expecting %s'%(value,self._addressTo)
python
def _checkFrom(self, pyobj): '''WS-Address From, XXX currently not checking the hostname, not forwarding messages. pyobj -- From server returned. ''' if pyobj is None: return value = pyobj._Address if value != self._addressTo: scheme,netloc,path,query,fragment = urlparse.urlsplit(value) hostport = netloc.split(':') schemeF,netlocF,pathF,queryF,fragmentF = urlparse.urlsplit(self._addressTo) if scheme==schemeF and path==pathF and query==queryF and fragment==fragmentF: netloc = netloc.split(':') + ['80'] netlocF = netlocF.split(':') + ['80'] if netloc[1]==netlocF[1] and (socket.gethostbyname(netlocF[0]) in ('127.0.0.1', socket.gethostbyname(netloc[0]))): return raise WSActionException, 'wrong WS-Address From(%s), expecting %s'%(value,self._addressTo)
['def', '_checkFrom', '(', 'self', ',', 'pyobj', ')', ':', 'if', 'pyobj', 'is', 'None', ':', 'return', 'value', '=', 'pyobj', '.', '_Address', 'if', 'value', '!=', 'self', '.', '_addressTo', ':', 'scheme', ',', 'netloc', ',', 'path', ',', 'query', ',', 'fragment', '=', 'urlparse', '.', 'urlsplit', '(', 'value', ')', 'hostport', '=', 'netloc', '.', 'split', '(', "':'", ')', 'schemeF', ',', 'netlocF', ',', 'pathF', ',', 'queryF', ',', 'fragmentF', '=', 'urlparse', '.', 'urlsplit', '(', 'self', '.', '_addressTo', ')', 'if', 'scheme', '==', 'schemeF', 'and', 'path', '==', 'pathF', 'and', 'query', '==', 'queryF', 'and', 'fragment', '==', 'fragmentF', ':', 'netloc', '=', 'netloc', '.', 'split', '(', "':'", ')', '+', '[', "'80'", ']', 'netlocF', '=', 'netlocF', '.', 'split', '(', "':'", ')', '+', '[', "'80'", ']', 'if', 'netloc', '[', '1', ']', '==', 'netlocF', '[', '1', ']', 'and', '(', 'socket', '.', 'gethostbyname', '(', 'netlocF', '[', '0', ']', ')', 'in', '(', "'127.0.0.1'", ',', 'socket', '.', 'gethostbyname', '(', 'netloc', '[', '0', ']', ')', ')', ')', ':', 'return', 'raise', 'WSActionException', ',', "'wrong WS-Address From(%s), expecting %s'", '%', '(', 'value', ',', 'self', '.', '_addressTo', ')']
WS-Address From, XXX currently not checking the hostname, not forwarding messages. pyobj -- From server returned.
['WS', '-', 'Address', 'From', 'XXX', 'currently', 'not', 'checking', 'the', 'hostname', 'not', 'forwarding', 'messages', '.', 'pyobj', '--', 'From', 'server', 'returned', '.']
train
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/address.py#L61-L79
7,103
juju/python-libjuju
juju/unit.py
Unit.run
async def run(self, command, timeout=None): """Run command on this unit. :param str command: The command to run :param int timeout: Time, in seconds, to wait before command is considered failed :returns: A :class:`juju.action.Action` instance. """ action = client.ActionFacade.from_connection(self.connection) log.debug( 'Running `%s` on %s', command, self.name) if timeout: # Convert seconds to nanoseconds timeout = int(timeout * 1000000000) res = await action.Run( [], command, [], timeout, [self.name], ) return await self.model.wait_for_action(res.results[0].action.tag)
python
async def run(self, command, timeout=None): """Run command on this unit. :param str command: The command to run :param int timeout: Time, in seconds, to wait before command is considered failed :returns: A :class:`juju.action.Action` instance. """ action = client.ActionFacade.from_connection(self.connection) log.debug( 'Running `%s` on %s', command, self.name) if timeout: # Convert seconds to nanoseconds timeout = int(timeout * 1000000000) res = await action.Run( [], command, [], timeout, [self.name], ) return await self.model.wait_for_action(res.results[0].action.tag)
['async', 'def', 'run', '(', 'self', ',', 'command', ',', 'timeout', '=', 'None', ')', ':', 'action', '=', 'client', '.', 'ActionFacade', '.', 'from_connection', '(', 'self', '.', 'connection', ')', 'log', '.', 'debug', '(', "'Running `%s` on %s'", ',', 'command', ',', 'self', '.', 'name', ')', 'if', 'timeout', ':', '# Convert seconds to nanoseconds', 'timeout', '=', 'int', '(', 'timeout', '*', '1000000000', ')', 'res', '=', 'await', 'action', '.', 'Run', '(', '[', ']', ',', 'command', ',', '[', ']', ',', 'timeout', ',', '[', 'self', '.', 'name', ']', ',', ')', 'return', 'await', 'self', '.', 'model', '.', 'wait_for_action', '(', 'res', '.', 'results', '[', '0', ']', '.', 'action', '.', 'tag', ')']
Run command on this unit. :param str command: The command to run :param int timeout: Time, in seconds, to wait before command is considered failed :returns: A :class:`juju.action.Action` instance.
['Run', 'command', 'on', 'this', 'unit', '.']
train
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/unit.py#L122-L147
7,104
Gandi/gandi.cli
gandi/cli/modules/network.py
Iface.update
def update(cls, id, bandwidth, vm, background): """ Update this iface. """ if not background and not cls.intty(): background = True iface_params = {} iface_id = cls.usable_id(id) if bandwidth: iface_params['bandwidth'] = bandwidth if iface_params: result = cls.call('hosting.iface.update', iface_id, iface_params) if background: return result # interactive mode, run a progress bar cls.echo('Updating your iface %s.' % id) cls.display_progress(result) if not vm: return vm_id = Iaas.usable_id(vm) opers = cls._detach(iface_id) if opers: cls.echo('Detaching iface.') cls.display_progress(opers) result = cls._attach(iface_id, vm_id) if background: return result cls.echo('Attaching your iface.') cls.display_progress(result)
python
def update(cls, id, bandwidth, vm, background): """ Update this iface. """ if not background and not cls.intty(): background = True iface_params = {} iface_id = cls.usable_id(id) if bandwidth: iface_params['bandwidth'] = bandwidth if iface_params: result = cls.call('hosting.iface.update', iface_id, iface_params) if background: return result # interactive mode, run a progress bar cls.echo('Updating your iface %s.' % id) cls.display_progress(result) if not vm: return vm_id = Iaas.usable_id(vm) opers = cls._detach(iface_id) if opers: cls.echo('Detaching iface.') cls.display_progress(opers) result = cls._attach(iface_id, vm_id) if background: return result cls.echo('Attaching your iface.') cls.display_progress(result)
['def', 'update', '(', 'cls', ',', 'id', ',', 'bandwidth', ',', 'vm', ',', 'background', ')', ':', 'if', 'not', 'background', 'and', 'not', 'cls', '.', 'intty', '(', ')', ':', 'background', '=', 'True', 'iface_params', '=', '{', '}', 'iface_id', '=', 'cls', '.', 'usable_id', '(', 'id', ')', 'if', 'bandwidth', ':', 'iface_params', '[', "'bandwidth'", ']', '=', 'bandwidth', 'if', 'iface_params', ':', 'result', '=', 'cls', '.', 'call', '(', "'hosting.iface.update'", ',', 'iface_id', ',', 'iface_params', ')', 'if', 'background', ':', 'return', 'result', '# interactive mode, run a progress bar', 'cls', '.', 'echo', '(', "'Updating your iface %s.'", '%', 'id', ')', 'cls', '.', 'display_progress', '(', 'result', ')', 'if', 'not', 'vm', ':', 'return', 'vm_id', '=', 'Iaas', '.', 'usable_id', '(', 'vm', ')', 'opers', '=', 'cls', '.', '_detach', '(', 'iface_id', ')', 'if', 'opers', ':', 'cls', '.', 'echo', '(', "'Detaching iface.'", ')', 'cls', '.', 'display_progress', '(', 'opers', ')', 'result', '=', 'cls', '.', '_attach', '(', 'iface_id', ',', 'vm_id', ')', 'if', 'background', ':', 'return', 'result', 'cls', '.', 'echo', '(', "'Attaching your iface.'", ')', 'cls', '.', 'display_progress', '(', 'result', ')']
Update this iface.
['Update', 'this', 'iface', '.']
train
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/network.py#L430-L466
7,105
Kozea/wdb
client/wdb/__init__.py
Wdb.handle_exception
def handle_exception(self, frame, exc_info): """This function is called if an exception occurs, but only if we are to stop at or just below this level.""" type_, value, tb = exc_info # Python 3 is broken see http://bugs.python.org/issue17413 _value = value if not isinstance(_value, BaseException): _value = type_(value) fake_exc_info = type_, _value, tb log.error('Exception during trace', exc_info=fake_exc_info) self.obj_cache[id(exc_info)] = exc_info self.extra_vars['__exception__'] = exc_info exception = type_.__name__ exception_description = str(value) init = 'Echo|%s' % dump({ 'for': '__exception__', 'val': escape('%s: %s') % (exception, exception_description) }) # User exception is 4 frames away from exception frame = frame or sys._getframe().f_back.f_back.f_back.f_back self.interaction( frame, tb, exception, exception_description, init=init )
python
def handle_exception(self, frame, exc_info): """This function is called if an exception occurs, but only if we are to stop at or just below this level.""" type_, value, tb = exc_info # Python 3 is broken see http://bugs.python.org/issue17413 _value = value if not isinstance(_value, BaseException): _value = type_(value) fake_exc_info = type_, _value, tb log.error('Exception during trace', exc_info=fake_exc_info) self.obj_cache[id(exc_info)] = exc_info self.extra_vars['__exception__'] = exc_info exception = type_.__name__ exception_description = str(value) init = 'Echo|%s' % dump({ 'for': '__exception__', 'val': escape('%s: %s') % (exception, exception_description) }) # User exception is 4 frames away from exception frame = frame or sys._getframe().f_back.f_back.f_back.f_back self.interaction( frame, tb, exception, exception_description, init=init )
['def', 'handle_exception', '(', 'self', ',', 'frame', ',', 'exc_info', ')', ':', 'type_', ',', 'value', ',', 'tb', '=', 'exc_info', '# Python 3 is broken see http://bugs.python.org/issue17413', '_value', '=', 'value', 'if', 'not', 'isinstance', '(', '_value', ',', 'BaseException', ')', ':', '_value', '=', 'type_', '(', 'value', ')', 'fake_exc_info', '=', 'type_', ',', '_value', ',', 'tb', 'log', '.', 'error', '(', "'Exception during trace'", ',', 'exc_info', '=', 'fake_exc_info', ')', 'self', '.', 'obj_cache', '[', 'id', '(', 'exc_info', ')', ']', '=', 'exc_info', 'self', '.', 'extra_vars', '[', "'__exception__'", ']', '=', 'exc_info', 'exception', '=', 'type_', '.', '__name__', 'exception_description', '=', 'str', '(', 'value', ')', 'init', '=', "'Echo|%s'", '%', 'dump', '(', '{', "'for'", ':', "'__exception__'", ',', "'val'", ':', 'escape', '(', "'%s: %s'", ')', '%', '(', 'exception', ',', 'exception_description', ')', '}', ')', '# User exception is 4 frames away from exception', 'frame', '=', 'frame', 'or', 'sys', '.', '_getframe', '(', ')', '.', 'f_back', '.', 'f_back', '.', 'f_back', '.', 'f_back', 'self', '.', 'interaction', '(', 'frame', ',', 'tb', ',', 'exception', ',', 'exception_description', ',', 'init', '=', 'init', ')']
This function is called if an exception occurs, but only if we are to stop at or just below this level.
['This', 'function', 'is', 'called', 'if', 'an', 'exception', 'occurs', 'but', 'only', 'if', 'we', 'are', 'to', 'stop', 'at', 'or', 'just', 'below', 'this', 'level', '.']
train
https://github.com/Kozea/wdb/blob/6af7901b02e866d76f8b0a697a8c078e5b70d1aa/client/wdb/__init__.py#L922-L944
7,106
pypa/pipenv
pipenv/vendor/cerberus/validator.py
BareValidator._validate_schema
def _validate_schema(self, schema, field, value): """ {'type': ['dict', 'string'], 'anyof': [{'validator': 'schema'}, {'validator': 'bulk_schema'}]} """ if schema is None: return if isinstance(value, Sequence) and not isinstance(value, _str_type): self.__validate_schema_sequence(field, schema, value) elif isinstance(value, Mapping): self.__validate_schema_mapping(field, schema, value)
python
def _validate_schema(self, schema, field, value): """ {'type': ['dict', 'string'], 'anyof': [{'validator': 'schema'}, {'validator': 'bulk_schema'}]} """ if schema is None: return if isinstance(value, Sequence) and not isinstance(value, _str_type): self.__validate_schema_sequence(field, schema, value) elif isinstance(value, Mapping): self.__validate_schema_mapping(field, schema, value)
['def', '_validate_schema', '(', 'self', ',', 'schema', ',', 'field', ',', 'value', ')', ':', 'if', 'schema', 'is', 'None', ':', 'return', 'if', 'isinstance', '(', 'value', ',', 'Sequence', ')', 'and', 'not', 'isinstance', '(', 'value', ',', '_str_type', ')', ':', 'self', '.', '__validate_schema_sequence', '(', 'field', ',', 'schema', ',', 'value', ')', 'elif', 'isinstance', '(', 'value', ',', 'Mapping', ')', ':', 'self', '.', '__validate_schema_mapping', '(', 'field', ',', 'schema', ',', 'value', ')']
{'type': ['dict', 'string'], 'anyof': [{'validator': 'schema'}, {'validator': 'bulk_schema'}]}
['{', 'type', ':', '[', 'dict', 'string', ']', 'anyof', ':', '[', '{', 'validator', ':', 'schema', '}', '{', 'validator', ':', 'bulk_schema', '}', ']', '}']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/cerberus/validator.py#L1226-L1236
7,107
santoshphilip/eppy
eppy/modeleditor.py
addthisbunch
def addthisbunch(bunchdt, data, commdct, thisbunch, theidf): """add a bunch to model. abunch usually comes from another idf file or it can be used to copy within the idf file""" key = thisbunch.key.upper() obj = copy.copy(thisbunch.obj) abunch = obj2bunch(data, commdct, obj) bunchdt[key].append(abunch) return abunch
python
def addthisbunch(bunchdt, data, commdct, thisbunch, theidf): """add a bunch to model. abunch usually comes from another idf file or it can be used to copy within the idf file""" key = thisbunch.key.upper() obj = copy.copy(thisbunch.obj) abunch = obj2bunch(data, commdct, obj) bunchdt[key].append(abunch) return abunch
['def', 'addthisbunch', '(', 'bunchdt', ',', 'data', ',', 'commdct', ',', 'thisbunch', ',', 'theidf', ')', ':', 'key', '=', 'thisbunch', '.', 'key', '.', 'upper', '(', ')', 'obj', '=', 'copy', '.', 'copy', '(', 'thisbunch', '.', 'obj', ')', 'abunch', '=', 'obj2bunch', '(', 'data', ',', 'commdct', ',', 'obj', ')', 'bunchdt', '[', 'key', ']', '.', 'append', '(', 'abunch', ')', 'return', 'abunch']
add a bunch to model. abunch usually comes from another idf file or it can be used to copy within the idf file
['add', 'a', 'bunch', 'to', 'model', '.', 'abunch', 'usually', 'comes', 'from', 'another', 'idf', 'file', 'or', 'it', 'can', 'be', 'used', 'to', 'copy', 'within', 'the', 'idf', 'file']
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L136-L144
7,108
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/string_headers.py
generate_make_string
def generate_make_string(out_f, max_step): """Generate the make_string template""" steps = [2 ** n for n in xrange(int(math.log(max_step, 2)), -1, -1)] with Namespace( out_f, ['boost', 'metaparse', 'v{0}'.format(VERSION), 'impl'] ) as nsp: generate_take(out_f, steps, nsp.prefix()) out_f.write( '{0}template <int LenNow, int LenRemaining, char... Cs>\n' '{0}struct make_string;\n' '\n' '{0}template <char... Cs>' ' struct make_string<0, 0, Cs...> : string<> {{}};\n' .format(nsp.prefix()) ) disable_sun = False for i in reversed(steps): if i > 64 and not disable_sun: out_f.write('#ifndef __SUNPRO_CC\n') disable_sun = True out_f.write( '{0}template <int LenRemaining,{1}char... Cs>' ' struct make_string<{2},LenRemaining,{3}Cs...> :' ' concat<string<{4}>,' ' typename make_string<take(LenRemaining),' 'LenRemaining-take(LenRemaining),Cs...>::type> {{}};\n' .format( nsp.prefix(), ''.join('char {0},'.format(n) for n in unique_names(i)), i, ''.join('{0},'.format(n) for n in unique_names(i)), ','.join(unique_names(i)) ) ) if disable_sun: out_f.write('#endif\n')
python
def generate_make_string(out_f, max_step): """Generate the make_string template""" steps = [2 ** n for n in xrange(int(math.log(max_step, 2)), -1, -1)] with Namespace( out_f, ['boost', 'metaparse', 'v{0}'.format(VERSION), 'impl'] ) as nsp: generate_take(out_f, steps, nsp.prefix()) out_f.write( '{0}template <int LenNow, int LenRemaining, char... Cs>\n' '{0}struct make_string;\n' '\n' '{0}template <char... Cs>' ' struct make_string<0, 0, Cs...> : string<> {{}};\n' .format(nsp.prefix()) ) disable_sun = False for i in reversed(steps): if i > 64 and not disable_sun: out_f.write('#ifndef __SUNPRO_CC\n') disable_sun = True out_f.write( '{0}template <int LenRemaining,{1}char... Cs>' ' struct make_string<{2},LenRemaining,{3}Cs...> :' ' concat<string<{4}>,' ' typename make_string<take(LenRemaining),' 'LenRemaining-take(LenRemaining),Cs...>::type> {{}};\n' .format( nsp.prefix(), ''.join('char {0},'.format(n) for n in unique_names(i)), i, ''.join('{0},'.format(n) for n in unique_names(i)), ','.join(unique_names(i)) ) ) if disable_sun: out_f.write('#endif\n')
['def', 'generate_make_string', '(', 'out_f', ',', 'max_step', ')', ':', 'steps', '=', '[', '2', '**', 'n', 'for', 'n', 'in', 'xrange', '(', 'int', '(', 'math', '.', 'log', '(', 'max_step', ',', '2', ')', ')', ',', '-', '1', ',', '-', '1', ')', ']', 'with', 'Namespace', '(', 'out_f', ',', '[', "'boost'", ',', "'metaparse'", ',', "'v{0}'", '.', 'format', '(', 'VERSION', ')', ',', "'impl'", ']', ')', 'as', 'nsp', ':', 'generate_take', '(', 'out_f', ',', 'steps', ',', 'nsp', '.', 'prefix', '(', ')', ')', 'out_f', '.', 'write', '(', "'{0}template <int LenNow, int LenRemaining, char... Cs>\\n'", "'{0}struct make_string;\\n'", "'\\n'", "'{0}template <char... Cs>'", "' struct make_string<0, 0, Cs...> : string<> {{}};\\n'", '.', 'format', '(', 'nsp', '.', 'prefix', '(', ')', ')', ')', 'disable_sun', '=', 'False', 'for', 'i', 'in', 'reversed', '(', 'steps', ')', ':', 'if', 'i', '>', '64', 'and', 'not', 'disable_sun', ':', 'out_f', '.', 'write', '(', "'#ifndef __SUNPRO_CC\\n'", ')', 'disable_sun', '=', 'True', 'out_f', '.', 'write', '(', "'{0}template <int LenRemaining,{1}char... Cs>'", "' struct make_string<{2},LenRemaining,{3}Cs...> :'", "' concat<string<{4}>,'", "' typename make_string<take(LenRemaining),'", "'LenRemaining-take(LenRemaining),Cs...>::type> {{}};\\n'", '.', 'format', '(', 'nsp', '.', 'prefix', '(', ')', ',', "''", '.', 'join', '(', "'char {0},'", '.', 'format', '(', 'n', ')', 'for', 'n', 'in', 'unique_names', '(', 'i', ')', ')', ',', 'i', ',', "''", '.', 'join', '(', "'{0},'", '.', 'format', '(', 'n', ')', 'for', 'n', 'in', 'unique_names', '(', 'i', ')', ')', ',', "','", '.', 'join', '(', 'unique_names', '(', 'i', ')', ')', ')', ')', 'if', 'disable_sun', ':', 'out_f', '.', 'write', '(', "'#endif\\n'", ')']
Generate the make_string template
['Generate', 'the', 'make_string', 'template']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/string_headers.py#L160-L199
7,109
pybel/pybel
src/pybel/struct/mutation/induction/random_subgraph.py
get_random_subgraph
def get_random_subgraph(graph, number_edges=None, number_seed_edges=None, seed=None, invert_degrees=None): """Generate a random subgraph based on weighted random walks from random seed edges. :type graph: pybel.BELGraph graph :param Optional[int] number_edges: Maximum number of edges. Defaults to :data:`pybel_tools.constants.SAMPLE_RANDOM_EDGE_COUNT` (250). :param Optional[int] number_seed_edges: Number of nodes to start with (which likely results in different components in large graphs). Defaults to :data:`SAMPLE_RANDOM_EDGE_SEED_COUNT` (5). :param Optional[int] seed: A seed for the random state :param Optional[bool] invert_degrees: Should the degrees be inverted? Defaults to true. :rtype: pybel.BELGraph """ if number_edges is None: number_edges = SAMPLE_RANDOM_EDGE_COUNT if number_seed_edges is None: number_seed_edges = SAMPLE_RANDOM_EDGE_SEED_COUNT if seed is not None: random.seed(seed) # Check if graph will sample full graph, and just return it if it would if graph.number_of_edges() <= number_edges: log.info('sampled full graph') return graph.copy() log.debug('getting random sub-graph with %d seed edges, %d final edges, and seed=%s', number_seed_edges, number_edges, seed) # Get initial graph with `number_seed_edges` edges result = get_graph_with_random_edges(graph, number_seed_edges) number_edges_remaining = number_edges - result.number_of_edges() _helper( result, graph, number_edges_remaining, node_blacklist=set(), # This is the set of nodes that should no longer be chosen to grow from invert_degrees=invert_degrees, ) log.debug('removing isolated nodes') remove_isolated_nodes(result) # update metadata update_node_helper(graph, result) update_metadata(graph, result) return result
python
def get_random_subgraph(graph, number_edges=None, number_seed_edges=None, seed=None, invert_degrees=None): """Generate a random subgraph based on weighted random walks from random seed edges. :type graph: pybel.BELGraph graph :param Optional[int] number_edges: Maximum number of edges. Defaults to :data:`pybel_tools.constants.SAMPLE_RANDOM_EDGE_COUNT` (250). :param Optional[int] number_seed_edges: Number of nodes to start with (which likely results in different components in large graphs). Defaults to :data:`SAMPLE_RANDOM_EDGE_SEED_COUNT` (5). :param Optional[int] seed: A seed for the random state :param Optional[bool] invert_degrees: Should the degrees be inverted? Defaults to true. :rtype: pybel.BELGraph """ if number_edges is None: number_edges = SAMPLE_RANDOM_EDGE_COUNT if number_seed_edges is None: number_seed_edges = SAMPLE_RANDOM_EDGE_SEED_COUNT if seed is not None: random.seed(seed) # Check if graph will sample full graph, and just return it if it would if graph.number_of_edges() <= number_edges: log.info('sampled full graph') return graph.copy() log.debug('getting random sub-graph with %d seed edges, %d final edges, and seed=%s', number_seed_edges, number_edges, seed) # Get initial graph with `number_seed_edges` edges result = get_graph_with_random_edges(graph, number_seed_edges) number_edges_remaining = number_edges - result.number_of_edges() _helper( result, graph, number_edges_remaining, node_blacklist=set(), # This is the set of nodes that should no longer be chosen to grow from invert_degrees=invert_degrees, ) log.debug('removing isolated nodes') remove_isolated_nodes(result) # update metadata update_node_helper(graph, result) update_metadata(graph, result) return result
['def', 'get_random_subgraph', '(', 'graph', ',', 'number_edges', '=', 'None', ',', 'number_seed_edges', '=', 'None', ',', 'seed', '=', 'None', ',', 'invert_degrees', '=', 'None', ')', ':', 'if', 'number_edges', 'is', 'None', ':', 'number_edges', '=', 'SAMPLE_RANDOM_EDGE_COUNT', 'if', 'number_seed_edges', 'is', 'None', ':', 'number_seed_edges', '=', 'SAMPLE_RANDOM_EDGE_SEED_COUNT', 'if', 'seed', 'is', 'not', 'None', ':', 'random', '.', 'seed', '(', 'seed', ')', '# Check if graph will sample full graph, and just return it if it would', 'if', 'graph', '.', 'number_of_edges', '(', ')', '<=', 'number_edges', ':', 'log', '.', 'info', '(', "'sampled full graph'", ')', 'return', 'graph', '.', 'copy', '(', ')', 'log', '.', 'debug', '(', "'getting random sub-graph with %d seed edges, %d final edges, and seed=%s'", ',', 'number_seed_edges', ',', 'number_edges', ',', 'seed', ')', '# Get initial graph with `number_seed_edges` edges', 'result', '=', 'get_graph_with_random_edges', '(', 'graph', ',', 'number_seed_edges', ')', 'number_edges_remaining', '=', 'number_edges', '-', 'result', '.', 'number_of_edges', '(', ')', '_helper', '(', 'result', ',', 'graph', ',', 'number_edges_remaining', ',', 'node_blacklist', '=', 'set', '(', ')', ',', '# This is the set of nodes that should no longer be chosen to grow from', 'invert_degrees', '=', 'invert_degrees', ',', ')', 'log', '.', 'debug', '(', "'removing isolated nodes'", ')', 'remove_isolated_nodes', '(', 'result', ')', '# update metadata', 'update_node_helper', '(', 'graph', ',', 'result', ')', 'update_metadata', '(', 'graph', ',', 'result', ')', 'return', 'result']
Generate a random subgraph based on weighted random walks from random seed edges. :type graph: pybel.BELGraph graph :param Optional[int] number_edges: Maximum number of edges. Defaults to :data:`pybel_tools.constants.SAMPLE_RANDOM_EDGE_COUNT` (250). :param Optional[int] number_seed_edges: Number of nodes to start with (which likely results in different components in large graphs). Defaults to :data:`SAMPLE_RANDOM_EDGE_SEED_COUNT` (5). :param Optional[int] seed: A seed for the random state :param Optional[bool] invert_degrees: Should the degrees be inverted? Defaults to true. :rtype: pybel.BELGraph
['Generate', 'a', 'random', 'subgraph', 'based', 'on', 'weighted', 'random', 'walks', 'from', 'random', 'seed', 'edges', '.']
train
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/mutation/induction/random_subgraph.py#L168-L216
7,110
pymc-devs/pymc
pymc/threadpool.py
map_noreturn
def map_noreturn(targ, argslist): """ parallel_call_noreturn(targ, argslist) :Parameters: - targ : function - argslist : list of tuples Does [targ(*args) for args in argslist] using the threadpool. """ # Thanks to Anne Archibald's handythread.py for the exception handling # mechanism. exceptions = [] n_threads = len(argslist) exc_lock = threading.Lock() done_lock = CountDownLatch(n_threads) def eb(wr, el=exc_lock, ex=exceptions, dl=done_lock): el.acquire() ex.append(sys.exc_info()) el.release() dl.countdown() def cb(wr, value, dl=done_lock): dl.countdown() for args in argslist: __PyMCThreadPool__.putRequest( WorkRequest(targ, callback=cb, exc_callback=eb, args=args, requestID=id(args))) done_lock.await_lock() if exceptions: six.reraise(*exceptions[0])
python
def map_noreturn(targ, argslist): """ parallel_call_noreturn(targ, argslist) :Parameters: - targ : function - argslist : list of tuples Does [targ(*args) for args in argslist] using the threadpool. """ # Thanks to Anne Archibald's handythread.py for the exception handling # mechanism. exceptions = [] n_threads = len(argslist) exc_lock = threading.Lock() done_lock = CountDownLatch(n_threads) def eb(wr, el=exc_lock, ex=exceptions, dl=done_lock): el.acquire() ex.append(sys.exc_info()) el.release() dl.countdown() def cb(wr, value, dl=done_lock): dl.countdown() for args in argslist: __PyMCThreadPool__.putRequest( WorkRequest(targ, callback=cb, exc_callback=eb, args=args, requestID=id(args))) done_lock.await_lock() if exceptions: six.reraise(*exceptions[0])
['def', 'map_noreturn', '(', 'targ', ',', 'argslist', ')', ':', "# Thanks to Anne Archibald's handythread.py for the exception handling", '# mechanism.', 'exceptions', '=', '[', ']', 'n_threads', '=', 'len', '(', 'argslist', ')', 'exc_lock', '=', 'threading', '.', 'Lock', '(', ')', 'done_lock', '=', 'CountDownLatch', '(', 'n_threads', ')', 'def', 'eb', '(', 'wr', ',', 'el', '=', 'exc_lock', ',', 'ex', '=', 'exceptions', ',', 'dl', '=', 'done_lock', ')', ':', 'el', '.', 'acquire', '(', ')', 'ex', '.', 'append', '(', 'sys', '.', 'exc_info', '(', ')', ')', 'el', '.', 'release', '(', ')', 'dl', '.', 'countdown', '(', ')', 'def', 'cb', '(', 'wr', ',', 'value', ',', 'dl', '=', 'done_lock', ')', ':', 'dl', '.', 'countdown', '(', ')', 'for', 'args', 'in', 'argslist', ':', '__PyMCThreadPool__', '.', 'putRequest', '(', 'WorkRequest', '(', 'targ', ',', 'callback', '=', 'cb', ',', 'exc_callback', '=', 'eb', ',', 'args', '=', 'args', ',', 'requestID', '=', 'id', '(', 'args', ')', ')', ')', 'done_lock', '.', 'await_lock', '(', ')', 'if', 'exceptions', ':', 'six', '.', 'reraise', '(', '*', 'exceptions', '[', '0', ']', ')']
parallel_call_noreturn(targ, argslist) :Parameters: - targ : function - argslist : list of tuples Does [targ(*args) for args in argslist] using the threadpool.
['parallel_call_noreturn', '(', 'targ', 'argslist', ')']
train
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/threadpool.py#L351-L390
7,111
pandas-dev/pandas
pandas/io/formats/console.py
get_console_size
def get_console_size(): """Return console size as tuple = (width, height). Returns (None,None) in non-interactive session. """ from pandas import get_option display_width = get_option('display.width') # deprecated. display_height = get_option('display.max_rows') # Consider # interactive shell terminal, can detect term size # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term # size non-interactive script, should disregard term size # in addition # width,height have default values, but setting to 'None' signals # should use Auto-Detection, But only in interactive shell-terminal. # Simple. yeah. if in_interactive_session(): if in_ipython_frontend(): # sane defaults for interactive non-shell terminal # match default for width,height in config_init from pandas._config.config import get_default_val terminal_width = get_default_val('display.width') terminal_height = get_default_val('display.max_rows') else: # pure terminal terminal_width, terminal_height = get_terminal_size() else: terminal_width, terminal_height = None, None # Note if the User sets width/Height to None (auto-detection) # and we're in a script (non-inter), this will return (None,None) # caller needs to deal. return (display_width or terminal_width, display_height or terminal_height)
python
def get_console_size(): """Return console size as tuple = (width, height). Returns (None,None) in non-interactive session. """ from pandas import get_option display_width = get_option('display.width') # deprecated. display_height = get_option('display.max_rows') # Consider # interactive shell terminal, can detect term size # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term # size non-interactive script, should disregard term size # in addition # width,height have default values, but setting to 'None' signals # should use Auto-Detection, But only in interactive shell-terminal. # Simple. yeah. if in_interactive_session(): if in_ipython_frontend(): # sane defaults for interactive non-shell terminal # match default for width,height in config_init from pandas._config.config import get_default_val terminal_width = get_default_val('display.width') terminal_height = get_default_val('display.max_rows') else: # pure terminal terminal_width, terminal_height = get_terminal_size() else: terminal_width, terminal_height = None, None # Note if the User sets width/Height to None (auto-detection) # and we're in a script (non-inter), this will return (None,None) # caller needs to deal. return (display_width or terminal_width, display_height or terminal_height)
['def', 'get_console_size', '(', ')', ':', 'from', 'pandas', 'import', 'get_option', 'display_width', '=', 'get_option', '(', "'display.width'", ')', '# deprecated.', 'display_height', '=', 'get_option', '(', "'display.max_rows'", ')', '# Consider', '# interactive shell terminal, can detect term size', '# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term', '# size non-interactive script, should disregard term size', '# in addition', "# width,height have default values, but setting to 'None' signals", '# should use Auto-Detection, But only in interactive shell-terminal.', '# Simple. yeah.', 'if', 'in_interactive_session', '(', ')', ':', 'if', 'in_ipython_frontend', '(', ')', ':', '# sane defaults for interactive non-shell terminal', '# match default for width,height in config_init', 'from', 'pandas', '.', '_config', '.', 'config', 'import', 'get_default_val', 'terminal_width', '=', 'get_default_val', '(', "'display.width'", ')', 'terminal_height', '=', 'get_default_val', '(', "'display.max_rows'", ')', 'else', ':', '# pure terminal', 'terminal_width', ',', 'terminal_height', '=', 'get_terminal_size', '(', ')', 'else', ':', 'terminal_width', ',', 'terminal_height', '=', 'None', ',', 'None', '# Note if the User sets width/Height to None (auto-detection)', "# and we're in a script (non-inter), this will return (None,None)", '# caller needs to deal.', 'return', '(', 'display_width', 'or', 'terminal_width', ',', 'display_height', 'or', 'terminal_height', ')']
Return console size as tuple = (width, height). Returns (None,None) in non-interactive session.
['Return', 'console', 'size', 'as', 'tuple', '=', '(', 'width', 'height', ')', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/console.py#L8-L45
7,112
SecurityInnovation/PGPy
pgpy/pgp.py
PGPKey.pubkey
def pubkey(self): """If the :py:obj:`PGPKey` object is a private key, this method returns a corresponding public key object with all the trimmings. Otherwise, returns ``None`` """ if not self.is_public: if self._sibling is None or isinstance(self._sibling, weakref.ref): # create a new key shell pub = PGPKey() pub.ascii_headers = self.ascii_headers.copy() # get the public half of the primary key pub._key = self._key.pubkey() # get the public half of each subkey for skid, subkey in self.subkeys.items(): pub |= subkey.pubkey # copy user ids and user attributes for uid in self._uids: pub |= copy.copy(uid) # copy signatures that weren't copied with uids for sig in self._signatures: if sig.parent is None: pub |= copy.copy(sig) # keep connect the two halves using a weak reference self._sibling = weakref.ref(pub) pub._sibling = weakref.ref(self) return self._sibling() return None
python
def pubkey(self): """If the :py:obj:`PGPKey` object is a private key, this method returns a corresponding public key object with all the trimmings. Otherwise, returns ``None`` """ if not self.is_public: if self._sibling is None or isinstance(self._sibling, weakref.ref): # create a new key shell pub = PGPKey() pub.ascii_headers = self.ascii_headers.copy() # get the public half of the primary key pub._key = self._key.pubkey() # get the public half of each subkey for skid, subkey in self.subkeys.items(): pub |= subkey.pubkey # copy user ids and user attributes for uid in self._uids: pub |= copy.copy(uid) # copy signatures that weren't copied with uids for sig in self._signatures: if sig.parent is None: pub |= copy.copy(sig) # keep connect the two halves using a weak reference self._sibling = weakref.ref(pub) pub._sibling = weakref.ref(self) return self._sibling() return None
['def', 'pubkey', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'is_public', ':', 'if', 'self', '.', '_sibling', 'is', 'None', 'or', 'isinstance', '(', 'self', '.', '_sibling', ',', 'weakref', '.', 'ref', ')', ':', '# create a new key shell', 'pub', '=', 'PGPKey', '(', ')', 'pub', '.', 'ascii_headers', '=', 'self', '.', 'ascii_headers', '.', 'copy', '(', ')', '# get the public half of the primary key', 'pub', '.', '_key', '=', 'self', '.', '_key', '.', 'pubkey', '(', ')', '# get the public half of each subkey', 'for', 'skid', ',', 'subkey', 'in', 'self', '.', 'subkeys', '.', 'items', '(', ')', ':', 'pub', '|=', 'subkey', '.', 'pubkey', '# copy user ids and user attributes', 'for', 'uid', 'in', 'self', '.', '_uids', ':', 'pub', '|=', 'copy', '.', 'copy', '(', 'uid', ')', "# copy signatures that weren't copied with uids", 'for', 'sig', 'in', 'self', '.', '_signatures', ':', 'if', 'sig', '.', 'parent', 'is', 'None', ':', 'pub', '|=', 'copy', '.', 'copy', '(', 'sig', ')', '# keep connect the two halves using a weak reference', 'self', '.', '_sibling', '=', 'weakref', '.', 'ref', '(', 'pub', ')', 'pub', '.', '_sibling', '=', 'weakref', '.', 'ref', '(', 'self', ')', 'return', 'self', '.', '_sibling', '(', ')', 'return', 'None']
If the :py:obj:`PGPKey` object is a private key, this method returns a corresponding public key object with all the trimmings. Otherwise, returns ``None``
['If', 'the', ':', 'py', ':', 'obj', ':', 'PGPKey', 'object', 'is', 'a', 'private', 'key', 'this', 'method', 'returns', 'a', 'corresponding', 'public', 'key', 'object', 'with', 'all', 'the', 'trimmings', '.', 'Otherwise', 'returns', 'None']
train
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L1317-L1348
7,113
gmr/rejected
rejected/data.py
Measurement.track_duration
def track_duration(self, key): """Context manager that sets a value with the duration of time that it takes to execute whatever it is wrapping. :param str key: The timing name """ if key not in self.durations: self.durations[key] = [] start_time = time.time() try: yield finally: self.durations[key].append( max(start_time, time.time()) - start_time)
python
def track_duration(self, key): """Context manager that sets a value with the duration of time that it takes to execute whatever it is wrapping. :param str key: The timing name """ if key not in self.durations: self.durations[key] = [] start_time = time.time() try: yield finally: self.durations[key].append( max(start_time, time.time()) - start_time)
['def', 'track_duration', '(', 'self', ',', 'key', ')', ':', 'if', 'key', 'not', 'in', 'self', '.', 'durations', ':', 'self', '.', 'durations', '[', 'key', ']', '=', '[', ']', 'start_time', '=', 'time', '.', 'time', '(', ')', 'try', ':', 'yield', 'finally', ':', 'self', '.', 'durations', '[', 'key', ']', '.', 'append', '(', 'max', '(', 'start_time', ',', 'time', '.', 'time', '(', ')', ')', '-', 'start_time', ')']
Context manager that sets a value with the duration of time that it takes to execute whatever it is wrapping. :param str key: The timing name
['Context', 'manager', 'that', 'sets', 'a', 'value', 'with', 'the', 'duration', 'of', 'time', 'that', 'it', 'takes', 'to', 'execute', 'whatever', 'it', 'is', 'wrapping', '.']
train
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/data.py#L257-L271
7,114
lobocv/anonymoususage
anonymoususage/tools.py
fetch
def fetch(dbconn, tablename, n=1, uuid=None, end=True): """ Returns `n` rows from the table's start or end :param dbconn: database connection :param tablename: name of the table :param n: number of rows to return from the end of the table :param uuid: Optional UUID to select from :return: If n > 1, a list of rows. If n=1, a single row """ cur = dbconn.cursor() order = 'DESC' if end else 'ASC' try: if uuid: cur.execute("SELECT * FROM '{}' WHERE UUID='{}' ORDER BY ROWID {} LIMIT {};".format(tablename, uuid, order, n)) else: cur.execute("SELECT * FROM '{}' ORDER BY ROWID {} LIMIT {};".format(tablename, order, n)) except sqlite3.OperationalError as e: if 'no such table' not in getattr(e, 'message', ''): # Suppress logging of errors generated when no table exists logger.error(e) return [] rows = cur.fetchall() return rows
python
def fetch(dbconn, tablename, n=1, uuid=None, end=True): """ Returns `n` rows from the table's start or end :param dbconn: database connection :param tablename: name of the table :param n: number of rows to return from the end of the table :param uuid: Optional UUID to select from :return: If n > 1, a list of rows. If n=1, a single row """ cur = dbconn.cursor() order = 'DESC' if end else 'ASC' try: if uuid: cur.execute("SELECT * FROM '{}' WHERE UUID='{}' ORDER BY ROWID {} LIMIT {};".format(tablename, uuid, order, n)) else: cur.execute("SELECT * FROM '{}' ORDER BY ROWID {} LIMIT {};".format(tablename, order, n)) except sqlite3.OperationalError as e: if 'no such table' not in getattr(e, 'message', ''): # Suppress logging of errors generated when no table exists logger.error(e) return [] rows = cur.fetchall() return rows
['def', 'fetch', '(', 'dbconn', ',', 'tablename', ',', 'n', '=', '1', ',', 'uuid', '=', 'None', ',', 'end', '=', 'True', ')', ':', 'cur', '=', 'dbconn', '.', 'cursor', '(', ')', 'order', '=', "'DESC'", 'if', 'end', 'else', "'ASC'", 'try', ':', 'if', 'uuid', ':', 'cur', '.', 'execute', '(', '"SELECT * FROM \'{}\' WHERE UUID=\'{}\' ORDER BY ROWID {} LIMIT {};"', '.', 'format', '(', 'tablename', ',', 'uuid', ',', 'order', ',', 'n', ')', ')', 'else', ':', 'cur', '.', 'execute', '(', '"SELECT * FROM \'{}\' ORDER BY ROWID {} LIMIT {};"', '.', 'format', '(', 'tablename', ',', 'order', ',', 'n', ')', ')', 'except', 'sqlite3', '.', 'OperationalError', 'as', 'e', ':', 'if', "'no such table'", 'not', 'in', 'getattr', '(', 'e', ',', "'message'", ',', "''", ')', ':', '# Suppress logging of errors generated when no table exists', 'logger', '.', 'error', '(', 'e', ')', 'return', '[', ']', 'rows', '=', 'cur', '.', 'fetchall', '(', ')', 'return', 'rows']
Returns `n` rows from the table's start or end :param dbconn: database connection :param tablename: name of the table :param n: number of rows to return from the end of the table :param uuid: Optional UUID to select from :return: If n > 1, a list of rows. If n=1, a single row
['Returns', 'n', 'rows', 'from', 'the', 'table', 's', 'start', 'or', 'end', ':', 'param', 'dbconn', ':', 'database', 'connection', ':', 'param', 'tablename', ':', 'name', 'of', 'the', 'table', ':', 'param', 'n', ':', 'number', 'of', 'rows', 'to', 'return', 'from', 'the', 'end', 'of', 'the', 'table', ':', 'param', 'uuid', ':', 'Optional', 'UUID', 'to', 'select', 'from', ':', 'return', ':', 'If', 'n', '>', '1', 'a', 'list', 'of', 'rows', '.', 'If', 'n', '=', '1', 'a', 'single', 'row']
train
https://github.com/lobocv/anonymoususage/blob/847bdad0746ad1cc6c57fb9def201beb59fb8300/anonymoususage/tools.py#L168-L190
7,115
EpistasisLab/tpot
tpot/base.py
TPOTBase._operator_count
def _operator_count(self, individual): """Count the number of pipeline operators as a measure of pipeline complexity. Parameters ---------- individual: list A grown tree with leaves at possibly different depths dependending on the condition function. Returns ------- operator_count: int How many operators in a pipeline """ operator_count = 0 for i in range(len(individual)): node = individual[i] if type(node) is deap.gp.Primitive and node.name != 'CombineDFs': operator_count += 1 return operator_count
python
def _operator_count(self, individual): """Count the number of pipeline operators as a measure of pipeline complexity. Parameters ---------- individual: list A grown tree with leaves at possibly different depths dependending on the condition function. Returns ------- operator_count: int How many operators in a pipeline """ operator_count = 0 for i in range(len(individual)): node = individual[i] if type(node) is deap.gp.Primitive and node.name != 'CombineDFs': operator_count += 1 return operator_count
['def', '_operator_count', '(', 'self', ',', 'individual', ')', ':', 'operator_count', '=', '0', 'for', 'i', 'in', 'range', '(', 'len', '(', 'individual', ')', ')', ':', 'node', '=', 'individual', '[', 'i', ']', 'if', 'type', '(', 'node', ')', 'is', 'deap', '.', 'gp', '.', 'Primitive', 'and', 'node', '.', 'name', '!=', "'CombineDFs'", ':', 'operator_count', '+=', '1', 'return', 'operator_count']
Count the number of pipeline operators as a measure of pipeline complexity. Parameters ---------- individual: list A grown tree with leaves at possibly different depths dependending on the condition function. Returns ------- operator_count: int How many operators in a pipeline
['Count', 'the', 'number', 'of', 'pipeline', 'operators', 'as', 'a', 'measure', 'of', 'pipeline', 'complexity', '.']
train
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/base.py#L1653-L1672
7,116
GPflow/GPflow
gpflow/params/parameter.py
Parameter.tf_compilation_index
def tf_compilation_index(self): """ Takes out index from the parameter's tensor name. E.g. parameter tensor name is GPR-0000/kern/lengthscales, the method for that parameter will return '0000' index. """ if self.parameter_tensor is None: return None name = self.parameter_tensor.name return name.split('-', 1)[-1].split('/')[0]
python
def tf_compilation_index(self): """ Takes out index from the parameter's tensor name. E.g. parameter tensor name is GPR-0000/kern/lengthscales, the method for that parameter will return '0000' index. """ if self.parameter_tensor is None: return None name = self.parameter_tensor.name return name.split('-', 1)[-1].split('/')[0]
['def', 'tf_compilation_index', '(', 'self', ')', ':', 'if', 'self', '.', 'parameter_tensor', 'is', 'None', ':', 'return', 'None', 'name', '=', 'self', '.', 'parameter_tensor', '.', 'name', 'return', 'name', '.', 'split', '(', "'-'", ',', '1', ')', '[', '-', '1', ']', '.', 'split', '(', "'/'", ')', '[', '0', ']']
Takes out index from the parameter's tensor name. E.g. parameter tensor name is GPR-0000/kern/lengthscales, the method for that parameter will return '0000' index.
['Takes', 'out', 'index', 'from', 'the', 'parameter', 's', 'tensor', 'name', '.', 'E', '.', 'g', '.', 'parameter', 'tensor', 'name', 'is', 'GPR', '-', '0000', '/', 'kern', '/', 'lengthscales', 'the', 'method', 'for', 'that', 'parameter', 'will', 'return', '0000', 'index', '.']
train
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/params/parameter.py#L309-L317
7,117
pbrisk/unicum
unicum/visibleobject.py
VisibleObject.from_range
def from_range(cls, range_list, register_flag=True): """ core class method to create visible objects from a range (nested list) """ s = dict_from_range(range_list) obj = cls.from_serializable(s, register_flag) return obj
python
def from_range(cls, range_list, register_flag=True): """ core class method to create visible objects from a range (nested list) """ s = dict_from_range(range_list) obj = cls.from_serializable(s, register_flag) return obj
['def', 'from_range', '(', 'cls', ',', 'range_list', ',', 'register_flag', '=', 'True', ')', ':', 's', '=', 'dict_from_range', '(', 'range_list', ')', 'obj', '=', 'cls', '.', 'from_serializable', '(', 's', ',', 'register_flag', ')', 'return', 'obj']
core class method to create visible objects from a range (nested list)
['core', 'class', 'method', 'to', 'create', 'visible', 'objects', 'from', 'a', 'range', '(', 'nested', 'list', ')']
train
https://github.com/pbrisk/unicum/blob/24bfa7355f36847a06646c58e9fd75bd3b689bfe/unicum/visibleobject.py#L68-L72
7,118
ravenac95/lxc4u
lxc4u/service.py
LXCService.list_names
def list_names(cls): """Lists all known LXC names""" response = subwrap.run(['lxc-ls']) output = response.std_out return map(str.strip, output.splitlines())
python
def list_names(cls): """Lists all known LXC names""" response = subwrap.run(['lxc-ls']) output = response.std_out return map(str.strip, output.splitlines())
['def', 'list_names', '(', 'cls', ')', ':', 'response', '=', 'subwrap', '.', 'run', '(', '[', "'lxc-ls'", ']', ')', 'output', '=', 'response', '.', 'std_out', 'return', 'map', '(', 'str', '.', 'strip', ',', 'output', '.', 'splitlines', '(', ')', ')']
Lists all known LXC names
['Lists', 'all', 'known', 'LXC', 'names']
train
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/service.py#L18-L22
7,119
materialsproject/pymatgen
pymatgen/electronic_structure/bandstructure.py
BandStructure.get_kpoint_degeneracy
def get_kpoint_degeneracy(self, kpoint, cartesian=False, tol=1e-2): """ Returns degeneracy of a given k-point based on structure symmetry Args: kpoint (1x3 array): coordinate of the k-point cartesian (bool): kpoint is in cartesian or fractional coordinates tol (float): tolerance below which coordinates are considered equal Returns: (int or None): degeneracy or None if structure is not available """ all_kpts = self.get_sym_eq_kpoints(kpoint, cartesian, tol=tol) if all_kpts is not None: return len(all_kpts)
python
def get_kpoint_degeneracy(self, kpoint, cartesian=False, tol=1e-2): """ Returns degeneracy of a given k-point based on structure symmetry Args: kpoint (1x3 array): coordinate of the k-point cartesian (bool): kpoint is in cartesian or fractional coordinates tol (float): tolerance below which coordinates are considered equal Returns: (int or None): degeneracy or None if structure is not available """ all_kpts = self.get_sym_eq_kpoints(kpoint, cartesian, tol=tol) if all_kpts is not None: return len(all_kpts)
['def', 'get_kpoint_degeneracy', '(', 'self', ',', 'kpoint', ',', 'cartesian', '=', 'False', ',', 'tol', '=', '1e-2', ')', ':', 'all_kpts', '=', 'self', '.', 'get_sym_eq_kpoints', '(', 'kpoint', ',', 'cartesian', ',', 'tol', '=', 'tol', ')', 'if', 'all_kpts', 'is', 'not', 'None', ':', 'return', 'len', '(', 'all_kpts', ')']
Returns degeneracy of a given k-point based on structure symmetry Args: kpoint (1x3 array): coordinate of the k-point cartesian (bool): kpoint is in cartesian or fractional coordinates tol (float): tolerance below which coordinates are considered equal Returns: (int or None): degeneracy or None if structure is not available
['Returns', 'degeneracy', 'of', 'a', 'given', 'k', '-', 'point', 'based', 'on', 'structure', 'symmetry', 'Args', ':', 'kpoint', '(', '1x3', 'array', ')', ':', 'coordinate', 'of', 'the', 'k', '-', 'point', 'cartesian', '(', 'bool', ')', ':', 'kpoint', 'is', 'in', 'cartesian', 'or', 'fractional', 'coordinates', 'tol', '(', 'float', ')', ':', 'tolerance', 'below', 'which', 'coordinates', 'are', 'considered', 'equal']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/bandstructure.py#L525-L538
7,120
thiagopbueno/rddl2tf
rddl2tf/fluent.py
TensorFluent.cos
def cos(cls, x: 'TensorFluent') -> 'TensorFluent': '''Returns a TensorFluent for the cos function. Args: x: The input fluent. Returns: A TensorFluent wrapping the cos function. ''' return cls._unary_op(x, tf.cos, tf.float32)
python
def cos(cls, x: 'TensorFluent') -> 'TensorFluent': '''Returns a TensorFluent for the cos function. Args: x: The input fluent. Returns: A TensorFluent wrapping the cos function. ''' return cls._unary_op(x, tf.cos, tf.float32)
['def', 'cos', '(', 'cls', ',', 'x', ':', "'TensorFluent'", ')', '->', "'TensorFluent'", ':', 'return', 'cls', '.', '_unary_op', '(', 'x', ',', 'tf', '.', 'cos', ',', 'tf', '.', 'float32', ')']
Returns a TensorFluent for the cos function. Args: x: The input fluent. Returns: A TensorFluent wrapping the cos function.
['Returns', 'a', 'TensorFluent', 'for', 'the', 'cos', 'function', '.']
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L334-L343
7,121
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
brocade_common_def.ipv6_ipv6route_route_dest
def ipv6_ipv6route_route_dest(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ipv6 = ET.SubElement(config, "ipv6", xmlns="urn:brocade.com:mgmt:brocade-common-def") ipv6route = ET.SubElement(ipv6, "ipv6route", xmlns="urn:brocade.com:mgmt:brocade-ip-forward") route = ET.SubElement(ipv6route, "route") dest = ET.SubElement(route, "dest") dest.text = kwargs.pop('dest') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def ipv6_ipv6route_route_dest(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ipv6 = ET.SubElement(config, "ipv6", xmlns="urn:brocade.com:mgmt:brocade-common-def") ipv6route = ET.SubElement(ipv6, "ipv6route", xmlns="urn:brocade.com:mgmt:brocade-ip-forward") route = ET.SubElement(ipv6route, "route") dest = ET.SubElement(route, "dest") dest.text = kwargs.pop('dest') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'ipv6_ipv6route_route_dest', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'ipv6', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"ipv6"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-common-def"', ')', 'ipv6route', '=', 'ET', '.', 'SubElement', '(', 'ipv6', ',', '"ipv6route"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-ip-forward"', ')', 'route', '=', 'ET', '.', 'SubElement', '(', 'ipv6route', ',', '"route"', ')', 'dest', '=', 'ET', '.', 'SubElement', '(', 'route', ',', '"dest"', ')', 'dest', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'dest'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L952-L963
7,122
FujiMakoto/AgentML
agentml/common.py
element
def element(element, name, default=None): """ Returns the value of an element, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param name: The name of the element to evaluate :type name: str :param default: The default value to return if the element is not defined """ element_value = element.find(name) return element_value.text if element_value is not None else default
python
def element(element, name, default=None): """ Returns the value of an element, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param name: The name of the element to evaluate :type name: str :param default: The default value to return if the element is not defined """ element_value = element.find(name) return element_value.text if element_value is not None else default
['def', 'element', '(', 'element', ',', 'name', ',', 'default', '=', 'None', ')', ':', 'element_value', '=', 'element', '.', 'find', '(', 'name', ')', 'return', 'element_value', '.', 'text', 'if', 'element_value', 'is', 'not', 'None', 'else', 'default']
Returns the value of an element, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param name: The name of the element to evaluate :type name: str :param default: The default value to return if the element is not defined
['Returns', 'the', 'value', 'of', 'an', 'element', 'or', 'a', 'default', 'if', 'it', 's', 'not', 'defined', ':', 'param', 'element', ':', 'The', 'XML', 'Element', 'object', ':', 'type', 'element', ':', 'etree', '.', '_Element']
train
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/common.py#L127-L140
7,123
UCL-INGI/INGInious
inginious/frontend/user_manager.py
UserManager.get_user_api_key
def get_user_api_key(self, username, create=True): """ Get the API key of a given user. API keys are generated on demand. :param username: :param create: Create the API key if none exists yet :return: the API key assigned to the user, or None if none exists and create is False. """ retval = self._database.users.find_one({"username": username}, {"apikey": 1}) if "apikey" not in retval and create: apikey = self.generate_api_key() self._database.users.update_one({"username": username}, {"$set": {"apikey": apikey}}) elif "apikey" not in retval: apikey = None else: apikey = retval["apikey"] return apikey
python
def get_user_api_key(self, username, create=True): """ Get the API key of a given user. API keys are generated on demand. :param username: :param create: Create the API key if none exists yet :return: the API key assigned to the user, or None if none exists and create is False. """ retval = self._database.users.find_one({"username": username}, {"apikey": 1}) if "apikey" not in retval and create: apikey = self.generate_api_key() self._database.users.update_one({"username": username}, {"$set": {"apikey": apikey}}) elif "apikey" not in retval: apikey = None else: apikey = retval["apikey"] return apikey
['def', 'get_user_api_key', '(', 'self', ',', 'username', ',', 'create', '=', 'True', ')', ':', 'retval', '=', 'self', '.', '_database', '.', 'users', '.', 'find_one', '(', '{', '"username"', ':', 'username', '}', ',', '{', '"apikey"', ':', '1', '}', ')', 'if', '"apikey"', 'not', 'in', 'retval', 'and', 'create', ':', 'apikey', '=', 'self', '.', 'generate_api_key', '(', ')', 'self', '.', '_database', '.', 'users', '.', 'update_one', '(', '{', '"username"', ':', 'username', '}', ',', '{', '"$set"', ':', '{', '"apikey"', ':', 'apikey', '}', '}', ')', 'elif', '"apikey"', 'not', 'in', 'retval', ':', 'apikey', '=', 'None', 'else', ':', 'apikey', '=', 'retval', '[', '"apikey"', ']', 'return', 'apikey']
Get the API key of a given user. API keys are generated on demand. :param username: :param create: Create the API key if none exists yet :return: the API key assigned to the user, or None if none exists and create is False.
['Get', 'the', 'API', 'key', 'of', 'a', 'given', 'user', '.', 'API', 'keys', 'are', 'generated', 'on', 'demand', '.', ':', 'param', 'username', ':', ':', 'param', 'create', ':', 'Create', 'the', 'API', 'key', 'if', 'none', 'exists', 'yet', ':', 'return', ':', 'the', 'API', 'key', 'assigned', 'to', 'the', 'user', 'or', 'None', 'if', 'none', 'exists', 'and', 'create', 'is', 'False', '.']
train
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/user_manager.py#L345-L361
7,124
earlye/nephele
nephele/AwsStack.py
AwsStack.printStack
def printStack(self,wrappedStack,include=None,filters=["*"]): """Prints the stack""" rawStack = wrappedStack['rawStack'] print "==== Stack {} ====".format(rawStack.name) print "Status: {} {}".format(rawStack.stack_status,defaultify(rawStack.stack_status_reason,'')) for resourceType, resources in wrappedStack['resourcesByTypeIndex'].items(): if resourceType in AwsProcessor.resourceTypeAliases: resourceType = AwsProcessor.resourceTypeAliases[resourceType]; if (None == include or resourceType in include) and len(resources): print "== {}:".format(resourceType) logicalIdWidth = 1 resourceStatusWidth = 1 resourceStatusReasonWidth = 1 for index, resource in resources.items(): logicalIdWidth = max(logicalIdWidth,len(resource.logical_id)) resourceStatusWidth = min(50,max(resourceStatusWidth,len(resource.resource_status))) resourceStatusReasonWidth = min(50,max(resourceStatusReasonWidth,len(defaultify(resource.resource_status_reason,'')))) frm = " {{0:3d}}: {{1:{0}}} {{2:{1}}} {{3}}".format(logicalIdWidth,resourceStatusWidth) for index, resource in resources.items(): if fnmatches(resource.logical_id.lower(),filters): print frm.format(index,resource.logical_id, elipsifyMiddle(repr(resource.resource_status),50), elipsifyMiddle(repr(defaultify(resource.resource_status_reason,'')),150))
python
def printStack(self,wrappedStack,include=None,filters=["*"]): """Prints the stack""" rawStack = wrappedStack['rawStack'] print "==== Stack {} ====".format(rawStack.name) print "Status: {} {}".format(rawStack.stack_status,defaultify(rawStack.stack_status_reason,'')) for resourceType, resources in wrappedStack['resourcesByTypeIndex'].items(): if resourceType in AwsProcessor.resourceTypeAliases: resourceType = AwsProcessor.resourceTypeAliases[resourceType]; if (None == include or resourceType in include) and len(resources): print "== {}:".format(resourceType) logicalIdWidth = 1 resourceStatusWidth = 1 resourceStatusReasonWidth = 1 for index, resource in resources.items(): logicalIdWidth = max(logicalIdWidth,len(resource.logical_id)) resourceStatusWidth = min(50,max(resourceStatusWidth,len(resource.resource_status))) resourceStatusReasonWidth = min(50,max(resourceStatusReasonWidth,len(defaultify(resource.resource_status_reason,'')))) frm = " {{0:3d}}: {{1:{0}}} {{2:{1}}} {{3}}".format(logicalIdWidth,resourceStatusWidth) for index, resource in resources.items(): if fnmatches(resource.logical_id.lower(),filters): print frm.format(index,resource.logical_id, elipsifyMiddle(repr(resource.resource_status),50), elipsifyMiddle(repr(defaultify(resource.resource_status_reason,'')),150))
['def', 'printStack', '(', 'self', ',', 'wrappedStack', ',', 'include', '=', 'None', ',', 'filters', '=', '[', '"*"', ']', ')', ':', 'rawStack', '=', 'wrappedStack', '[', "'rawStack'", ']', 'print', '"==== Stack {} ===="', '.', 'format', '(', 'rawStack', '.', 'name', ')', 'print', '"Status: {} {}"', '.', 'format', '(', 'rawStack', '.', 'stack_status', ',', 'defaultify', '(', 'rawStack', '.', 'stack_status_reason', ',', "''", ')', ')', 'for', 'resourceType', ',', 'resources', 'in', 'wrappedStack', '[', "'resourcesByTypeIndex'", ']', '.', 'items', '(', ')', ':', 'if', 'resourceType', 'in', 'AwsProcessor', '.', 'resourceTypeAliases', ':', 'resourceType', '=', 'AwsProcessor', '.', 'resourceTypeAliases', '[', 'resourceType', ']', 'if', '(', 'None', '==', 'include', 'or', 'resourceType', 'in', 'include', ')', 'and', 'len', '(', 'resources', ')', ':', 'print', '"== {}:"', '.', 'format', '(', 'resourceType', ')', 'logicalIdWidth', '=', '1', 'resourceStatusWidth', '=', '1', 'resourceStatusReasonWidth', '=', '1', 'for', 'index', ',', 'resource', 'in', 'resources', '.', 'items', '(', ')', ':', 'logicalIdWidth', '=', 'max', '(', 'logicalIdWidth', ',', 'len', '(', 'resource', '.', 'logical_id', ')', ')', 'resourceStatusWidth', '=', 'min', '(', '50', ',', 'max', '(', 'resourceStatusWidth', ',', 'len', '(', 'resource', '.', 'resource_status', ')', ')', ')', 'resourceStatusReasonWidth', '=', 'min', '(', '50', ',', 'max', '(', 'resourceStatusReasonWidth', ',', 'len', '(', 'defaultify', '(', 'resource', '.', 'resource_status_reason', ',', "''", ')', ')', ')', ')', 'frm', '=', '" {{0:3d}}: {{1:{0}}} {{2:{1}}} {{3}}"', '.', 'format', '(', 'logicalIdWidth', ',', 'resourceStatusWidth', ')', 'for', 'index', ',', 'resource', 'in', 'resources', '.', 'items', '(', ')', ':', 'if', 'fnmatches', '(', 'resource', '.', 'logical_id', '.', 'lower', '(', ')', ',', 'filters', ')', ':', 'print', 'frm', '.', 'format', '(', 'index', ',', 'resource', '.', 'logical_id', ',', 'elipsifyMiddle', '(', 'repr', '(', 'resource', '.', 'resource_status', ')', ',', '50', ')', ',', 'elipsifyMiddle', '(', 'repr', '(', 'defaultify', '(', 'resource', '.', 'resource_status_reason', ',', "''", ')', ')', ',', '150', ')', ')']
Prints the stack
['Prints', 'the', 'stack']
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsStack.py#L95-L118
7,125
jaseg/python-mpv
mpv.py
MPV.wait_for_property
def wait_for_property(self, name, cond=lambda val: val, level_sensitive=True): """Waits until ``cond`` evaluates to a truthy value on the named property. This can be used to wait for properties such as ``idle_active`` indicating the player is done with regular playback and just idling around """ sema = threading.Semaphore(value=0) def observer(name, val): if cond(val): sema.release() self.observe_property(name, observer) if not level_sensitive or not cond(getattr(self, name.replace('-', '_'))): sema.acquire() self.unobserve_property(name, observer)
python
def wait_for_property(self, name, cond=lambda val: val, level_sensitive=True): """Waits until ``cond`` evaluates to a truthy value on the named property. This can be used to wait for properties such as ``idle_active`` indicating the player is done with regular playback and just idling around """ sema = threading.Semaphore(value=0) def observer(name, val): if cond(val): sema.release() self.observe_property(name, observer) if not level_sensitive or not cond(getattr(self, name.replace('-', '_'))): sema.acquire() self.unobserve_property(name, observer)
['def', 'wait_for_property', '(', 'self', ',', 'name', ',', 'cond', '=', 'lambda', 'val', ':', 'val', ',', 'level_sensitive', '=', 'True', ')', ':', 'sema', '=', 'threading', '.', 'Semaphore', '(', 'value', '=', '0', ')', 'def', 'observer', '(', 'name', ',', 'val', ')', ':', 'if', 'cond', '(', 'val', ')', ':', 'sema', '.', 'release', '(', ')', 'self', '.', 'observe_property', '(', 'name', ',', 'observer', ')', 'if', 'not', 'level_sensitive', 'or', 'not', 'cond', '(', 'getattr', '(', 'self', ',', 'name', '.', 'replace', '(', "'-'", ',', "'_'", ')', ')', ')', ':', 'sema', '.', 'acquire', '(', ')', 'self', '.', 'unobserve_property', '(', 'name', ',', 'observer', ')']
Waits until ``cond`` evaluates to a truthy value on the named property. This can be used to wait for properties such as ``idle_active`` indicating the player is done with regular playback and just idling around
['Waits', 'until', 'cond', 'evaluates', 'to', 'a', 'truthy', 'value', 'on', 'the', 'named', 'property', '.', 'This', 'can', 'be', 'used', 'to', 'wait', 'for', 'properties', 'such', 'as', 'idle_active', 'indicating', 'the', 'player', 'is', 'done', 'with', 'regular', 'playback', 'and', 'just', 'idling', 'around']
train
https://github.com/jaseg/python-mpv/blob/7117de4005cc470a45efd9cf2e9657bdf63a9079/mpv.py#L582-L593
7,126
titusjan/argos
argos/config/intcti.py
IntCtiEditor.finalize
def finalize(self): """ Called at clean up. Is used to disconnect signals. """ self.spinBox.valueChanged.disconnect(self.commitChangedValue) super(IntCtiEditor, self).finalize()
python
def finalize(self): """ Called at clean up. Is used to disconnect signals. """ self.spinBox.valueChanged.disconnect(self.commitChangedValue) super(IntCtiEditor, self).finalize()
['def', 'finalize', '(', 'self', ')', ':', 'self', '.', 'spinBox', '.', 'valueChanged', '.', 'disconnect', '(', 'self', '.', 'commitChangedValue', ')', 'super', '(', 'IntCtiEditor', ',', 'self', ')', '.', 'finalize', '(', ')']
Called at clean up. Is used to disconnect signals.
['Called', 'at', 'clean', 'up', '.', 'Is', 'used', 'to', 'disconnect', 'signals', '.']
train
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/intcti.py#L120-L124
7,127
ethereum/py-evm
eth/tools/builder/chain/builders.py
copy
def copy(chain: MiningChain) -> MiningChain: """ Make a copy of the chain at the given state. Actions performed on the resulting chain will not affect the original chain. """ if not isinstance(chain, MiningChain): raise ValidationError("`at_block_number` may only be used with 'MiningChain") base_db = chain.chaindb.db if not isinstance(base_db, AtomicDB): raise ValidationError("Unsupported database type: {0}".format(type(base_db))) if isinstance(base_db.wrapped_db, MemoryDB): db = AtomicDB(MemoryDB(base_db.wrapped_db.kv_store.copy())) else: raise ValidationError("Unsupported wrapped database: {0}".format(type(base_db.wrapped_db))) chain_copy = type(chain)(db, chain.header) return chain_copy
python
def copy(chain: MiningChain) -> MiningChain: """ Make a copy of the chain at the given state. Actions performed on the resulting chain will not affect the original chain. """ if not isinstance(chain, MiningChain): raise ValidationError("`at_block_number` may only be used with 'MiningChain") base_db = chain.chaindb.db if not isinstance(base_db, AtomicDB): raise ValidationError("Unsupported database type: {0}".format(type(base_db))) if isinstance(base_db.wrapped_db, MemoryDB): db = AtomicDB(MemoryDB(base_db.wrapped_db.kv_store.copy())) else: raise ValidationError("Unsupported wrapped database: {0}".format(type(base_db.wrapped_db))) chain_copy = type(chain)(db, chain.header) return chain_copy
['def', 'copy', '(', 'chain', ':', 'MiningChain', ')', '->', 'MiningChain', ':', 'if', 'not', 'isinstance', '(', 'chain', ',', 'MiningChain', ')', ':', 'raise', 'ValidationError', '(', '"`at_block_number` may only be used with \'MiningChain"', ')', 'base_db', '=', 'chain', '.', 'chaindb', '.', 'db', 'if', 'not', 'isinstance', '(', 'base_db', ',', 'AtomicDB', ')', ':', 'raise', 'ValidationError', '(', '"Unsupported database type: {0}"', '.', 'format', '(', 'type', '(', 'base_db', ')', ')', ')', 'if', 'isinstance', '(', 'base_db', '.', 'wrapped_db', ',', 'MemoryDB', ')', ':', 'db', '=', 'AtomicDB', '(', 'MemoryDB', '(', 'base_db', '.', 'wrapped_db', '.', 'kv_store', '.', 'copy', '(', ')', ')', ')', 'else', ':', 'raise', 'ValidationError', '(', '"Unsupported wrapped database: {0}"', '.', 'format', '(', 'type', '(', 'base_db', '.', 'wrapped_db', ')', ')', ')', 'chain_copy', '=', 'type', '(', 'chain', ')', '(', 'db', ',', 'chain', '.', 'header', ')', 'return', 'chain_copy']
Make a copy of the chain at the given state. Actions performed on the resulting chain will not affect the original chain.
['Make', 'a', 'copy', 'of', 'the', 'chain', 'at', 'the', 'given', 'state', '.', 'Actions', 'performed', 'on', 'the', 'resulting', 'chain', 'will', 'not', 'affect', 'the', 'original', 'chain', '.']
train
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/tools/builder/chain/builders.py#L433-L450
7,128
spyder-ide/spyder
spyder/plugins/ipythonconsole/plugin.py
IPythonConsole.disambiguate_fname
def disambiguate_fname(self, fname): """Generate a file name without ambiguation.""" files_path_list = [filename for filename in self.filenames if filename] return sourcecode.disambiguate_fname(files_path_list, fname)
python
def disambiguate_fname(self, fname): """Generate a file name without ambiguation.""" files_path_list = [filename for filename in self.filenames if filename] return sourcecode.disambiguate_fname(files_path_list, fname)
['def', 'disambiguate_fname', '(', 'self', ',', 'fname', ')', ':', 'files_path_list', '=', '[', 'filename', 'for', 'filename', 'in', 'self', '.', 'filenames', 'if', 'filename', ']', 'return', 'sourcecode', '.', 'disambiguate_fname', '(', 'files_path_list', ',', 'fname', ')']
Generate a file name without ambiguation.
['Generate', 'a', 'file', 'name', 'without', 'ambiguation', '.']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/plugin.py#L1269-L1273
7,129
materialsproject/pymatgen
pymatgen/analysis/chemenv/coordination_environments/coordination_geometry_finder.py
LocalGeometryFinder.setup_ordered_indices_local_geometry
def setup_ordered_indices_local_geometry(self, coordination): """ Sets up ordered indices for the local geometry, for testing purposes :param coordination: coordination of the local geometry """ self.icentral_site = 0 self.indices = list(range(1, coordination + 1))
python
def setup_ordered_indices_local_geometry(self, coordination): """ Sets up ordered indices for the local geometry, for testing purposes :param coordination: coordination of the local geometry """ self.icentral_site = 0 self.indices = list(range(1, coordination + 1))
['def', 'setup_ordered_indices_local_geometry', '(', 'self', ',', 'coordination', ')', ':', 'self', '.', 'icentral_site', '=', '0', 'self', '.', 'indices', '=', 'list', '(', 'range', '(', '1', ',', 'coordination', '+', '1', ')', ')']
Sets up ordered indices for the local geometry, for testing purposes :param coordination: coordination of the local geometry
['Sets', 'up', 'ordered', 'indices', 'for', 'the', 'local', 'geometry', 'for', 'testing', 'purposes', ':', 'param', 'coordination', ':', 'coordination', 'of', 'the', 'local', 'geometry']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/chemenv/coordination_environments/coordination_geometry_finder.py#L938-L944
7,130
saltstack/salt
salt/modules/boto_route53.py
create_hosted_zone
def create_hosted_zone(domain_name, caller_ref=None, comment='', private_zone=False, vpc_id=None, vpc_name=None, vpc_region=None, region=None, key=None, keyid=None, profile=None): ''' Create a new Route53 Hosted Zone. Returns a Python data structure with information about the newly created Hosted Zone. domain_name The name of the domain. This must be fully-qualified, terminating with a period. This is the name you have registered with your domain registrar. It is also the name you will delegate from your registrar to the Amazon Route 53 delegation servers returned in response to this request. caller_ref A unique string that identifies the request and that allows create_hosted_zone() calls to be retried without the risk of executing the operation twice. It can take several minutes for the change to replicate globally, and change from PENDING to INSYNC status. Thus it's best to provide some value for this where possible, since duplicate calls while the first is in PENDING status will be accepted and can lead to multiple copies of the zone being created. On the other hand, if a zone is created with a given caller_ref, then deleted, a second attempt to create a zone with the same caller_ref will fail until that caller_ref is flushed from the Route53 system, which can take upwards of 24 hours. comment Any comments you want to include about the hosted zone. private_zone Set True if creating a private hosted zone. vpc_id When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with vpe_name. Ignored when creating a non-private zone. vpc_name When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with vpe_id. Ignored when creating a non-private zone. vpc_region When creating a private hosted zone, the region of the associated VPC is required. If not provided, an effort will be made to determine it from vpc_id or vpc_name, where possible. If this fails, you'll need to provide an explicit value for this option. Ignored when creating a non-private zone. region Region endpoint to connect to. key AWS key to bind with. keyid AWS keyid to bind with. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example:: salt myminion boto_route53.create_hosted_zone example.org ''' if region is None: region = 'universal' if not domain_name.endswith('.'): raise SaltInvocationError('Domain MUST be fully-qualified, complete ' 'with ending period.') conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deets = conn.get_hosted_zone_by_name(domain_name) if deets: log.info('Route53 hosted zone %s already exists', domain_name) return None args = {'domain_name': domain_name, 'caller_ref': caller_ref, 'comment': comment, 'private_zone': private_zone} if private_zone: if not _exactly_one((vpc_name, vpc_id)): raise SaltInvocationError('Either vpc_name or vpc_id is required ' 'when creating a private zone.') vpcs = __salt__['boto_vpc.describe_vpcs']( vpc_id=vpc_id, name=vpc_name, region=region, key=key, keyid=keyid, profile=profile).get('vpcs', []) if vpc_region and vpcs: vpcs = [v for v in vpcs if v['region'] == vpc_region] if not vpcs: log.error('Private zone requested but a VPC matching given criteria' ' not found.') return None if len(vpcs) > 1: log.error('Private zone requested but multiple VPCs matching given ' 'criteria found: %s.', [v['id'] for v in vpcs]) return None vpc = vpcs[0] if vpc_name: vpc_id = vpc['id'] if not vpc_region: vpc_region = vpc['region'] args.update({'vpc_id': vpc_id, 'vpc_region': vpc_region}) else: if any((vpc_id, vpc_name, vpc_region)): log.info('Options vpc_id, vpc_name, and vpc_region are ignored ' 'when creating non-private zones.') r = _try_func(conn, 'create_hosted_zone', **args) if r is None: log.error('Failed to create hosted zone %s', domain_name) return None r = r.get('CreateHostedZoneResponse', {}) # Pop it since it'll be irrelevant by the time we return status = r.pop('ChangeInfo', {}).get('Id', '').replace('/change/', '') synced = _wait_for_sync(status, conn, wait=600) if not synced: log.error('Hosted zone %s not synced after 600 seconds.', domain_name) return None return r
python
def create_hosted_zone(domain_name, caller_ref=None, comment='', private_zone=False, vpc_id=None, vpc_name=None, vpc_region=None, region=None, key=None, keyid=None, profile=None): ''' Create a new Route53 Hosted Zone. Returns a Python data structure with information about the newly created Hosted Zone. domain_name The name of the domain. This must be fully-qualified, terminating with a period. This is the name you have registered with your domain registrar. It is also the name you will delegate from your registrar to the Amazon Route 53 delegation servers returned in response to this request. caller_ref A unique string that identifies the request and that allows create_hosted_zone() calls to be retried without the risk of executing the operation twice. It can take several minutes for the change to replicate globally, and change from PENDING to INSYNC status. Thus it's best to provide some value for this where possible, since duplicate calls while the first is in PENDING status will be accepted and can lead to multiple copies of the zone being created. On the other hand, if a zone is created with a given caller_ref, then deleted, a second attempt to create a zone with the same caller_ref will fail until that caller_ref is flushed from the Route53 system, which can take upwards of 24 hours. comment Any comments you want to include about the hosted zone. private_zone Set True if creating a private hosted zone. vpc_id When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with vpe_name. Ignored when creating a non-private zone. vpc_name When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with vpe_id. Ignored when creating a non-private zone. vpc_region When creating a private hosted zone, the region of the associated VPC is required. If not provided, an effort will be made to determine it from vpc_id or vpc_name, where possible. If this fails, you'll need to provide an explicit value for this option. Ignored when creating a non-private zone. region Region endpoint to connect to. key AWS key to bind with. keyid AWS keyid to bind with. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example:: salt myminion boto_route53.create_hosted_zone example.org ''' if region is None: region = 'universal' if not domain_name.endswith('.'): raise SaltInvocationError('Domain MUST be fully-qualified, complete ' 'with ending period.') conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deets = conn.get_hosted_zone_by_name(domain_name) if deets: log.info('Route53 hosted zone %s already exists', domain_name) return None args = {'domain_name': domain_name, 'caller_ref': caller_ref, 'comment': comment, 'private_zone': private_zone} if private_zone: if not _exactly_one((vpc_name, vpc_id)): raise SaltInvocationError('Either vpc_name or vpc_id is required ' 'when creating a private zone.') vpcs = __salt__['boto_vpc.describe_vpcs']( vpc_id=vpc_id, name=vpc_name, region=region, key=key, keyid=keyid, profile=profile).get('vpcs', []) if vpc_region and vpcs: vpcs = [v for v in vpcs if v['region'] == vpc_region] if not vpcs: log.error('Private zone requested but a VPC matching given criteria' ' not found.') return None if len(vpcs) > 1: log.error('Private zone requested but multiple VPCs matching given ' 'criteria found: %s.', [v['id'] for v in vpcs]) return None vpc = vpcs[0] if vpc_name: vpc_id = vpc['id'] if not vpc_region: vpc_region = vpc['region'] args.update({'vpc_id': vpc_id, 'vpc_region': vpc_region}) else: if any((vpc_id, vpc_name, vpc_region)): log.info('Options vpc_id, vpc_name, and vpc_region are ignored ' 'when creating non-private zones.') r = _try_func(conn, 'create_hosted_zone', **args) if r is None: log.error('Failed to create hosted zone %s', domain_name) return None r = r.get('CreateHostedZoneResponse', {}) # Pop it since it'll be irrelevant by the time we return status = r.pop('ChangeInfo', {}).get('Id', '').replace('/change/', '') synced = _wait_for_sync(status, conn, wait=600) if not synced: log.error('Hosted zone %s not synced after 600 seconds.', domain_name) return None return r
['def', 'create_hosted_zone', '(', 'domain_name', ',', 'caller_ref', '=', 'None', ',', 'comment', '=', "''", ',', 'private_zone', '=', 'False', ',', 'vpc_id', '=', 'None', ',', 'vpc_name', '=', 'None', ',', 'vpc_region', '=', 'None', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'if', 'region', 'is', 'None', ':', 'region', '=', "'universal'", 'if', 'not', 'domain_name', '.', 'endswith', '(', "'.'", ')', ':', 'raise', 'SaltInvocationError', '(', "'Domain MUST be fully-qualified, complete '", "'with ending period.'", ')', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'deets', '=', 'conn', '.', 'get_hosted_zone_by_name', '(', 'domain_name', ')', 'if', 'deets', ':', 'log', '.', 'info', '(', "'Route53 hosted zone %s already exists'", ',', 'domain_name', ')', 'return', 'None', 'args', '=', '{', "'domain_name'", ':', 'domain_name', ',', "'caller_ref'", ':', 'caller_ref', ',', "'comment'", ':', 'comment', ',', "'private_zone'", ':', 'private_zone', '}', 'if', 'private_zone', ':', 'if', 'not', '_exactly_one', '(', '(', 'vpc_name', ',', 'vpc_id', ')', ')', ':', 'raise', 'SaltInvocationError', '(', "'Either vpc_name or vpc_id is required '", "'when creating a private zone.'", ')', 'vpcs', '=', '__salt__', '[', "'boto_vpc.describe_vpcs'", ']', '(', 'vpc_id', '=', 'vpc_id', ',', 'name', '=', 'vpc_name', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', '.', 'get', '(', "'vpcs'", ',', '[', ']', ')', 'if', 'vpc_region', 'and', 'vpcs', ':', 'vpcs', '=', '[', 'v', 'for', 'v', 'in', 'vpcs', 'if', 'v', '[', "'region'", ']', '==', 'vpc_region', ']', 'if', 'not', 'vpcs', ':', 'log', '.', 'error', '(', "'Private zone requested but a VPC matching given criteria'", "' not found.'", ')', 'return', 'None', 'if', 'len', '(', 'vpcs', ')', '>', '1', ':', 'log', '.', 'error', '(', "'Private zone requested but multiple VPCs matching given '", "'criteria found: %s.'", ',', '[', 'v', '[', "'id'", ']', 'for', 'v', 'in', 'vpcs', ']', ')', 'return', 'None', 'vpc', '=', 'vpcs', '[', '0', ']', 'if', 'vpc_name', ':', 'vpc_id', '=', 'vpc', '[', "'id'", ']', 'if', 'not', 'vpc_region', ':', 'vpc_region', '=', 'vpc', '[', "'region'", ']', 'args', '.', 'update', '(', '{', "'vpc_id'", ':', 'vpc_id', ',', "'vpc_region'", ':', 'vpc_region', '}', ')', 'else', ':', 'if', 'any', '(', '(', 'vpc_id', ',', 'vpc_name', ',', 'vpc_region', ')', ')', ':', 'log', '.', 'info', '(', "'Options vpc_id, vpc_name, and vpc_region are ignored '", "'when creating non-private zones.'", ')', 'r', '=', '_try_func', '(', 'conn', ',', "'create_hosted_zone'", ',', '*', '*', 'args', ')', 'if', 'r', 'is', 'None', ':', 'log', '.', 'error', '(', "'Failed to create hosted zone %s'", ',', 'domain_name', ')', 'return', 'None', 'r', '=', 'r', '.', 'get', '(', "'CreateHostedZoneResponse'", ',', '{', '}', ')', "# Pop it since it'll be irrelevant by the time we return", 'status', '=', 'r', '.', 'pop', '(', "'ChangeInfo'", ',', '{', '}', ')', '.', 'get', '(', "'Id'", ',', "''", ')', '.', 'replace', '(', "'/change/'", ',', "''", ')', 'synced', '=', '_wait_for_sync', '(', 'status', ',', 'conn', ',', 'wait', '=', '600', ')', 'if', 'not', 'synced', ':', 'log', '.', 'error', '(', "'Hosted zone %s not synced after 600 seconds.'", ',', 'domain_name', ')', 'return', 'None', 'return', 'r']
Create a new Route53 Hosted Zone. Returns a Python data structure with information about the newly created Hosted Zone. domain_name The name of the domain. This must be fully-qualified, terminating with a period. This is the name you have registered with your domain registrar. It is also the name you will delegate from your registrar to the Amazon Route 53 delegation servers returned in response to this request. caller_ref A unique string that identifies the request and that allows create_hosted_zone() calls to be retried without the risk of executing the operation twice. It can take several minutes for the change to replicate globally, and change from PENDING to INSYNC status. Thus it's best to provide some value for this where possible, since duplicate calls while the first is in PENDING status will be accepted and can lead to multiple copies of the zone being created. On the other hand, if a zone is created with a given caller_ref, then deleted, a second attempt to create a zone with the same caller_ref will fail until that caller_ref is flushed from the Route53 system, which can take upwards of 24 hours. comment Any comments you want to include about the hosted zone. private_zone Set True if creating a private hosted zone. vpc_id When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with vpe_name. Ignored when creating a non-private zone. vpc_name When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with vpe_id. Ignored when creating a non-private zone. vpc_region When creating a private hosted zone, the region of the associated VPC is required. If not provided, an effort will be made to determine it from vpc_id or vpc_name, where possible. If this fails, you'll need to provide an explicit value for this option. Ignored when creating a non-private zone. region Region endpoint to connect to. key AWS key to bind with. keyid AWS keyid to bind with. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example:: salt myminion boto_route53.create_hosted_zone example.org
['Create', 'a', 'new', 'Route53', 'Hosted', 'Zone', '.', 'Returns', 'a', 'Python', 'data', 'structure', 'with', 'information', 'about', 'the', 'newly', 'created', 'Hosted', 'Zone', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_route53.py#L798-L915
7,131
smartmob-project/smartmob-agent
smartmob_agent/__init__.py
start_responder
def start_responder(host='127.0.0.1', port=8080, event_log=None, loop=None): """.""" loop = loop or asyncio.get_event_loop() event_log = event_log or structlog.get_logger() # Prepare a web application. app = web.Application(loop=loop, middlewares=[ inject_request_id, access_log_middleware, ]) app.on_response_prepare.append(echo_request_id) app.router.add_route('GET', '/', index) app.router.add_route('POST', '/create-process', create_process, name='create-process') app.router.add_route('GET', '/process-status/{slug}', process_status, name='process-status') app.router.add_route('POST', '/delete-process/{slug}', delete_process, name='delete-process') app.router.add_route('GET', '/attach-console/{slug}', attach_console, name='attach-console') app.router.add_route('GET', '/list-processes', list_processes, name='list-processes') # Create storage folders. archives_path = os.path.join( '.', '.smartmob', 'archives', ) if not os.path.isdir(archives_path): os.makedirs(archives_path) sources_path = os.path.join( '.', '.smartmob', 'sources', ) if not os.path.isdir(sources_path): os.makedirs(sources_path) envs_path = os.path.join( '.', '.smartmob', 'envs', ) if not os.path.isdir(envs_path): os.makedirs(envs_path) event_log.info('bind', transport='tcp', host=host, port=port) # Start accepting connections. handler = app.make_handler() server = yield from loop.create_server(handler, host, port) return app, handler, server
python
def start_responder(host='127.0.0.1', port=8080, event_log=None, loop=None): """.""" loop = loop or asyncio.get_event_loop() event_log = event_log or structlog.get_logger() # Prepare a web application. app = web.Application(loop=loop, middlewares=[ inject_request_id, access_log_middleware, ]) app.on_response_prepare.append(echo_request_id) app.router.add_route('GET', '/', index) app.router.add_route('POST', '/create-process', create_process, name='create-process') app.router.add_route('GET', '/process-status/{slug}', process_status, name='process-status') app.router.add_route('POST', '/delete-process/{slug}', delete_process, name='delete-process') app.router.add_route('GET', '/attach-console/{slug}', attach_console, name='attach-console') app.router.add_route('GET', '/list-processes', list_processes, name='list-processes') # Create storage folders. archives_path = os.path.join( '.', '.smartmob', 'archives', ) if not os.path.isdir(archives_path): os.makedirs(archives_path) sources_path = os.path.join( '.', '.smartmob', 'sources', ) if not os.path.isdir(sources_path): os.makedirs(sources_path) envs_path = os.path.join( '.', '.smartmob', 'envs', ) if not os.path.isdir(envs_path): os.makedirs(envs_path) event_log.info('bind', transport='tcp', host=host, port=port) # Start accepting connections. handler = app.make_handler() server = yield from loop.create_server(handler, host, port) return app, handler, server
['def', 'start_responder', '(', 'host', '=', "'127.0.0.1'", ',', 'port', '=', '8080', ',', 'event_log', '=', 'None', ',', 'loop', '=', 'None', ')', ':', 'loop', '=', 'loop', 'or', 'asyncio', '.', 'get_event_loop', '(', ')', 'event_log', '=', 'event_log', 'or', 'structlog', '.', 'get_logger', '(', ')', '# Prepare a web application.', 'app', '=', 'web', '.', 'Application', '(', 'loop', '=', 'loop', ',', 'middlewares', '=', '[', 'inject_request_id', ',', 'access_log_middleware', ',', ']', ')', 'app', '.', 'on_response_prepare', '.', 'append', '(', 'echo_request_id', ')', 'app', '.', 'router', '.', 'add_route', '(', "'GET'", ',', "'/'", ',', 'index', ')', 'app', '.', 'router', '.', 'add_route', '(', "'POST'", ',', "'/create-process'", ',', 'create_process', ',', 'name', '=', "'create-process'", ')', 'app', '.', 'router', '.', 'add_route', '(', "'GET'", ',', "'/process-status/{slug}'", ',', 'process_status', ',', 'name', '=', "'process-status'", ')', 'app', '.', 'router', '.', 'add_route', '(', "'POST'", ',', "'/delete-process/{slug}'", ',', 'delete_process', ',', 'name', '=', "'delete-process'", ')', 'app', '.', 'router', '.', 'add_route', '(', "'GET'", ',', "'/attach-console/{slug}'", ',', 'attach_console', ',', 'name', '=', "'attach-console'", ')', 'app', '.', 'router', '.', 'add_route', '(', "'GET'", ',', "'/list-processes'", ',', 'list_processes', ',', 'name', '=', "'list-processes'", ')', '# Create storage folders.', 'archives_path', '=', 'os', '.', 'path', '.', 'join', '(', "'.'", ',', "'.smartmob'", ',', "'archives'", ',', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'archives_path', ')', ':', 'os', '.', 'makedirs', '(', 'archives_path', ')', 'sources_path', '=', 'os', '.', 'path', '.', 'join', '(', "'.'", ',', "'.smartmob'", ',', "'sources'", ',', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'sources_path', ')', ':', 'os', '.', 'makedirs', '(', 'sources_path', ')', 'envs_path', '=', 'os', '.', 'path', '.', 'join', '(', "'.'", ',', "'.smartmob'", ',', "'envs'", ',', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'envs_path', ')', ':', 'os', '.', 'makedirs', '(', 'envs_path', ')', 'event_log', '.', 'info', '(', "'bind'", ',', 'transport', '=', "'tcp'", ',', 'host', '=', 'host', ',', 'port', '=', 'port', ')', '# Start accepting connections.', 'handler', '=', 'app', '.', 'make_handler', '(', ')', 'server', '=', 'yield', 'from', 'loop', '.', 'create_server', '(', 'handler', ',', 'host', ',', 'port', ')', 'return', 'app', ',', 'handler', ',', 'server']
.
['.']
train
https://github.com/smartmob-project/smartmob-agent/blob/4039f577ab7230d135f00df68c611a51e45ddbc7/smartmob_agent/__init__.py#L627-L673
7,132
datadotworld/data.world-py
datadotworld/client/_swagger/apis/datasets_api.py
DatasetsApi.sync
def sync(self, owner, id, **kwargs): """ Sync files Update all files within a dataset that have originally been added via URL (e.g. via /datasets endpoints or on data.world). Check-out or tutorials for tips on how to add Google Sheets, GitHub and S3 files via URL and how to use webhooks or scripts to keep them always in sync. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.sync(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.sync_with_http_info(owner, id, **kwargs) else: (data) = self.sync_with_http_info(owner, id, **kwargs) return data
python
def sync(self, owner, id, **kwargs): """ Sync files Update all files within a dataset that have originally been added via URL (e.g. via /datasets endpoints or on data.world). Check-out or tutorials for tips on how to add Google Sheets, GitHub and S3 files via URL and how to use webhooks or scripts to keep them always in sync. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.sync(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.sync_with_http_info(owner, id, **kwargs) else: (data) = self.sync_with_http_info(owner, id, **kwargs) return data
['def', 'sync', '(', 'self', ',', 'owner', ',', 'id', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'callback'", ')', ':', 'return', 'self', '.', 'sync_with_http_info', '(', 'owner', ',', 'id', ',', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'sync_with_http_info', '(', 'owner', ',', 'id', ',', '*', '*', 'kwargs', ')', 'return', 'data']
Sync files Update all files within a dataset that have originally been added via URL (e.g. via /datasets endpoints or on data.world). Check-out or tutorials for tips on how to add Google Sheets, GitHub and S3 files via URL and how to use webhooks or scripts to keep them always in sync. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.sync(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread.
['Sync', 'files', 'Update', 'all', 'files', 'within', 'a', 'dataset', 'that', 'have', 'originally', 'been', 'added', 'via', 'URL', '(', 'e', '.', 'g', '.', 'via', '/', 'datasets', 'endpoints', 'or', 'on', 'data', '.', 'world', ')', '.', 'Check', '-', 'out', 'or', 'tutorials', 'for', 'tips', 'on', 'how', 'to', 'add', 'Google', 'Sheets', 'GitHub', 'and', 'S3', 'files', 'via', 'URL', 'and', 'how', 'to', 'use', 'webhooks', 'or', 'scripts', 'to', 'keep', 'them', 'always', 'in', 'sync', '.', 'This', 'method', 'makes', 'a', 'synchronous', 'HTTP', 'request', 'by', 'default', '.', 'To', 'make', 'an', 'asynchronous', 'HTTP', 'request', 'please', 'define', 'a', 'callback', 'function', 'to', 'be', 'invoked', 'when', 'receiving', 'the', 'response', '.', '>>>', 'def', 'callback_function', '(', 'response', ')', ':', '>>>', 'pprint', '(', 'response', ')', '>>>', '>>>', 'thread', '=', 'api', '.', 'sync', '(', 'owner', 'id', 'callback', '=', 'callback_function', ')']
train
https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/client/_swagger/apis/datasets_api.py#L1001-L1026
7,133
wummel/linkchecker
third_party/dnspython/dns/name.py
Name.to_unicode
def to_unicode(self, omit_final_dot = False): """Convert name to Unicode text format. IDN ACE lables are converted to Unicode. @param omit_final_dot: If True, don't emit the final dot (denoting the root label) for absolute names. The default is False. @rtype: string """ if len(self.labels) == 0: return u'@' if len(self.labels) == 1 and self.labels[0] == '': return u'.' if omit_final_dot and self.is_absolute(): l = self.labels[:-1] else: l = self.labels s = u'.'.join([encodings.idna.ToUnicode(_escapify(x)) for x in l]) return s
python
def to_unicode(self, omit_final_dot = False): """Convert name to Unicode text format. IDN ACE lables are converted to Unicode. @param omit_final_dot: If True, don't emit the final dot (denoting the root label) for absolute names. The default is False. @rtype: string """ if len(self.labels) == 0: return u'@' if len(self.labels) == 1 and self.labels[0] == '': return u'.' if omit_final_dot and self.is_absolute(): l = self.labels[:-1] else: l = self.labels s = u'.'.join([encodings.idna.ToUnicode(_escapify(x)) for x in l]) return s
['def', 'to_unicode', '(', 'self', ',', 'omit_final_dot', '=', 'False', ')', ':', 'if', 'len', '(', 'self', '.', 'labels', ')', '==', '0', ':', 'return', "u'@'", 'if', 'len', '(', 'self', '.', 'labels', ')', '==', '1', 'and', 'self', '.', 'labels', '[', '0', ']', '==', "''", ':', 'return', "u'.'", 'if', 'omit_final_dot', 'and', 'self', '.', 'is_absolute', '(', ')', ':', 'l', '=', 'self', '.', 'labels', '[', ':', '-', '1', ']', 'else', ':', 'l', '=', 'self', '.', 'labels', 's', '=', "u'.'", '.', 'join', '(', '[', 'encodings', '.', 'idna', '.', 'ToUnicode', '(', '_escapify', '(', 'x', ')', ')', 'for', 'x', 'in', 'l', ']', ')', 'return', 's']
Convert name to Unicode text format. IDN ACE lables are converted to Unicode. @param omit_final_dot: If True, don't emit the final dot (denoting the root label) for absolute names. The default is False. @rtype: string
['Convert', 'name', 'to', 'Unicode', 'text', 'format', '.']
train
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/name.py#L330-L349
7,134
Data-Mechanics/geoql
geoql/geoql.py
features_keep_within_radius
def features_keep_within_radius(obj, center, radius, units): """ Filter all features in a collection by retaining only those that fall within the specified radius. """ features_keep = [] for feature in tqdm(obj['features']): if all([getattr(geopy.distance.vincenty((lat,lon), center), units) < radius for (lon,lat) in geojson.utils.coords(feature)]): features_keep.append(feature) obj['features'] = features_keep return obj
python
def features_keep_within_radius(obj, center, radius, units): """ Filter all features in a collection by retaining only those that fall within the specified radius. """ features_keep = [] for feature in tqdm(obj['features']): if all([getattr(geopy.distance.vincenty((lat,lon), center), units) < radius for (lon,lat) in geojson.utils.coords(feature)]): features_keep.append(feature) obj['features'] = features_keep return obj
['def', 'features_keep_within_radius', '(', 'obj', ',', 'center', ',', 'radius', ',', 'units', ')', ':', 'features_keep', '=', '[', ']', 'for', 'feature', 'in', 'tqdm', '(', 'obj', '[', "'features'", ']', ')', ':', 'if', 'all', '(', '[', 'getattr', '(', 'geopy', '.', 'distance', '.', 'vincenty', '(', '(', 'lat', ',', 'lon', ')', ',', 'center', ')', ',', 'units', ')', '<', 'radius', 'for', '(', 'lon', ',', 'lat', ')', 'in', 'geojson', '.', 'utils', '.', 'coords', '(', 'feature', ')', ']', ')', ':', 'features_keep', '.', 'append', '(', 'feature', ')', 'obj', '[', "'features'", ']', '=', 'features_keep', 'return', 'obj']
Filter all features in a collection by retaining only those that fall within the specified radius.
['Filter', 'all', 'features', 'in', 'a', 'collection', 'by', 'retaining', 'only', 'those', 'that', 'fall', 'within', 'the', 'specified', 'radius', '.']
train
https://github.com/Data-Mechanics/geoql/blob/c6184e1734c76a259855d6282e919614839a767e/geoql/geoql.py#L97-L107
7,135
vallis/libstempo
libstempo/eccUtils.py
solve_coupled_ecc_solution
def solve_coupled_ecc_solution(F0, e0, gamma0, phase0, mc, q, t): """ Compute the solution to the coupled system of equations from from Peters (1964) and Barack & Cutler (2004) at a given time. :param F0: Initial orbital frequency [Hz] :param e0: Initial orbital eccentricity :param gamma0: Initial angle of precession of periastron [rad] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :param t: Time at which to evaluate solution [s] :returns: (F(t), e(t), gamma(t), phase(t)) """ y0 = np.array([F0, e0, gamma0, phase0]) y, infodict = odeint(get_coupled_ecc_eqns, y0, t, args=(mc,q), full_output=True) if infodict['message'] == 'Integration successful.': ret = y else: ret = 0 return ret
python
def solve_coupled_ecc_solution(F0, e0, gamma0, phase0, mc, q, t): """ Compute the solution to the coupled system of equations from from Peters (1964) and Barack & Cutler (2004) at a given time. :param F0: Initial orbital frequency [Hz] :param e0: Initial orbital eccentricity :param gamma0: Initial angle of precession of periastron [rad] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :param t: Time at which to evaluate solution [s] :returns: (F(t), e(t), gamma(t), phase(t)) """ y0 = np.array([F0, e0, gamma0, phase0]) y, infodict = odeint(get_coupled_ecc_eqns, y0, t, args=(mc,q), full_output=True) if infodict['message'] == 'Integration successful.': ret = y else: ret = 0 return ret
['def', 'solve_coupled_ecc_solution', '(', 'F0', ',', 'e0', ',', 'gamma0', ',', 'phase0', ',', 'mc', ',', 'q', ',', 't', ')', ':', 'y0', '=', 'np', '.', 'array', '(', '[', 'F0', ',', 'e0', ',', 'gamma0', ',', 'phase0', ']', ')', 'y', ',', 'infodict', '=', 'odeint', '(', 'get_coupled_ecc_eqns', ',', 'y0', ',', 't', ',', 'args', '=', '(', 'mc', ',', 'q', ')', ',', 'full_output', '=', 'True', ')', 'if', 'infodict', '[', "'message'", ']', '==', "'Integration successful.'", ':', 'ret', '=', 'y', 'else', ':', 'ret', '=', '0', 'return', 'ret']
Compute the solution to the coupled system of equations from from Peters (1964) and Barack & Cutler (2004) at a given time. :param F0: Initial orbital frequency [Hz] :param e0: Initial orbital eccentricity :param gamma0: Initial angle of precession of periastron [rad] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :param t: Time at which to evaluate solution [s] :returns: (F(t), e(t), gamma(t), phase(t))
['Compute', 'the', 'solution', 'to', 'the', 'coupled', 'system', 'of', 'equations', 'from', 'from', 'Peters', '(', '1964', ')', 'and', 'Barack', '&', 'Cutler', '(', '2004', ')', 'at', 'a', 'given', 'time', '.', ':', 'param', 'F0', ':', 'Initial', 'orbital', 'frequency', '[', 'Hz', ']', ':', 'param', 'e0', ':', 'Initial', 'orbital', 'eccentricity', ':', 'param', 'gamma0', ':', 'Initial', 'angle', 'of', 'precession', 'of', 'periastron', '[', 'rad', ']', ':', 'param', 'mc', ':', 'Chirp', 'mass', 'of', 'binary', '[', 'Solar', 'Mass', ']', ':', 'param', 'q', ':', 'Mass', 'ratio', 'of', 'binary', ':', 'param', 't', ':', 'Time', 'at', 'which', 'to', 'evaluate', 'solution', '[', 's', ']', ':', 'returns', ':', '(', 'F', '(', 't', ')', 'e', '(', 't', ')', 'gamma', '(', 't', ')', 'phase', '(', 't', '))']
train
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L143-L169
7,136
camptocamp/Studio
studio/controllers/mapfiles.py
MapfilesController._delete_map_from_user_by_id
def _delete_map_from_user_by_id(self, user, map_id): """ Delete a mapfile entry from database. """ map = self._get_map_from_user_by_id(user, map_id) if map is None: return None Session.delete(map) Session.commit() return map
python
def _delete_map_from_user_by_id(self, user, map_id): """ Delete a mapfile entry from database. """ map = self._get_map_from_user_by_id(user, map_id) if map is None: return None Session.delete(map) Session.commit() return map
['def', '_delete_map_from_user_by_id', '(', 'self', ',', 'user', ',', 'map_id', ')', ':', 'map', '=', 'self', '.', '_get_map_from_user_by_id', '(', 'user', ',', 'map_id', ')', 'if', 'map', 'is', 'None', ':', 'return', 'None', 'Session', '.', 'delete', '(', 'map', ')', 'Session', '.', 'commit', '(', ')', 'return', 'map']
Delete a mapfile entry from database.
['Delete', 'a', 'mapfile', 'entry', 'from', 'database', '.']
train
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/controllers/mapfiles.py#L211-L218
7,137
sigsep/sigsep-mus-eval
museval/metrics.py
_project
def _project(reference_sources, C): """Project images using pre-computed filters C reference_sources are nsrc X nsampl X nchan C is nsrc X nchan X filters_len X nchan """ # shapes: ensure that input is 3d (comprising the source index) if len(reference_sources.shape) == 2: reference_sources = reference_sources[None, ...] C = C[None, ...] (nsrc, nsampl, nchan) = reference_sources.shape filters_len = C.shape[-2] # zero pad reference_sources = _zeropad(reference_sources, filters_len - 1, axis=1) sproj = np.zeros((nchan, nsampl + filters_len - 1)) for (j, cj, c) in itertools.product( list(range(nsrc)), list(range(nchan)), list(range(nchan)) ): sproj[c] += fftconvolve( C[j, cj, :, c], reference_sources[j, :, cj] )[:nsampl + filters_len - 1] return sproj.T
python
def _project(reference_sources, C): """Project images using pre-computed filters C reference_sources are nsrc X nsampl X nchan C is nsrc X nchan X filters_len X nchan """ # shapes: ensure that input is 3d (comprising the source index) if len(reference_sources.shape) == 2: reference_sources = reference_sources[None, ...] C = C[None, ...] (nsrc, nsampl, nchan) = reference_sources.shape filters_len = C.shape[-2] # zero pad reference_sources = _zeropad(reference_sources, filters_len - 1, axis=1) sproj = np.zeros((nchan, nsampl + filters_len - 1)) for (j, cj, c) in itertools.product( list(range(nsrc)), list(range(nchan)), list(range(nchan)) ): sproj[c] += fftconvolve( C[j, cj, :, c], reference_sources[j, :, cj] )[:nsampl + filters_len - 1] return sproj.T
['def', '_project', '(', 'reference_sources', ',', 'C', ')', ':', '# shapes: ensure that input is 3d (comprising the source index)', 'if', 'len', '(', 'reference_sources', '.', 'shape', ')', '==', '2', ':', 'reference_sources', '=', 'reference_sources', '[', 'None', ',', '...', ']', 'C', '=', 'C', '[', 'None', ',', '...', ']', '(', 'nsrc', ',', 'nsampl', ',', 'nchan', ')', '=', 'reference_sources', '.', 'shape', 'filters_len', '=', 'C', '.', 'shape', '[', '-', '2', ']', '# zero pad', 'reference_sources', '=', '_zeropad', '(', 'reference_sources', ',', 'filters_len', '-', '1', ',', 'axis', '=', '1', ')', 'sproj', '=', 'np', '.', 'zeros', '(', '(', 'nchan', ',', 'nsampl', '+', 'filters_len', '-', '1', ')', ')', 'for', '(', 'j', ',', 'cj', ',', 'c', ')', 'in', 'itertools', '.', 'product', '(', 'list', '(', 'range', '(', 'nsrc', ')', ')', ',', 'list', '(', 'range', '(', 'nchan', ')', ')', ',', 'list', '(', 'range', '(', 'nchan', ')', ')', ')', ':', 'sproj', '[', 'c', ']', '+=', 'fftconvolve', '(', 'C', '[', 'j', ',', 'cj', ',', ':', ',', 'c', ']', ',', 'reference_sources', '[', 'j', ',', ':', ',', 'cj', ']', ')', '[', ':', 'nsampl', '+', 'filters_len', '-', '1', ']', 'return', 'sproj', '.', 'T']
Project images using pre-computed filters C reference_sources are nsrc X nsampl X nchan C is nsrc X nchan X filters_len X nchan
['Project', 'images', 'using', 'pre', '-', 'computed', 'filters', 'C', 'reference_sources', 'are', 'nsrc', 'X', 'nsampl', 'X', 'nchan', 'C', 'is', 'nsrc', 'X', 'nchan', 'X', 'filters_len', 'X', 'nchan']
train
https://github.com/sigsep/sigsep-mus-eval/blob/a7c9af3647f0c0bb9bbaeccec0b1a6a9e09d1e2d/museval/metrics.py#L605-L629
7,138
joke2k/faker
faker/providers/misc/__init__.py
Provider.binary
def binary(self, length=(1 * 1024 * 1024)): """ Returns random binary blob. Default blob size is 1 Mb. """ blob = [self.generator.random.randrange(256) for _ in range(length)] return bytes(blob) if sys.version_info[0] >= 3 else bytearray(blob)
python
def binary(self, length=(1 * 1024 * 1024)): """ Returns random binary blob. Default blob size is 1 Mb. """ blob = [self.generator.random.randrange(256) for _ in range(length)] return bytes(blob) if sys.version_info[0] >= 3 else bytearray(blob)
['def', 'binary', '(', 'self', ',', 'length', '=', '(', '1', '*', '1024', '*', '1024', ')', ')', ':', 'blob', '=', '[', 'self', '.', 'generator', '.', 'random', '.', 'randrange', '(', '256', ')', 'for', '_', 'in', 'range', '(', 'length', ')', ']', 'return', 'bytes', '(', 'blob', ')', 'if', 'sys', '.', 'version_info', '[', '0', ']', '>=', '3', 'else', 'bytearray', '(', 'blob', ')']
Returns random binary blob. Default blob size is 1 Mb.
['Returns', 'random', 'binary', 'blob', '.']
train
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/misc/__init__.py#L23-L29
7,139
iotile/coretools
iotilesensorgraph/iotile/sg/sensor_log.py
SensorLog.set_rollover
def set_rollover(self, area, enabled): """Configure whether rollover is enabled for streaming or storage streams. Normally a SensorLog is used in ring-buffer mode which means that old readings are automatically overwritten as needed when new data is saved. However, you can configure it into fill-stop mode by using: set_rollover("streaming"|"storage", True|False) By default rollover is set to True for both streaming and storage and can be controlled individually for each one. Args: area (str): Either streaming or storage. enabled (bool): Whether to enable or disable rollover. """ if area == u'streaming': self._rollover_streaming = enabled elif area == u'storage': self._rollover_storage = enabled else: raise ArgumentError("You must pass one of 'storage' or 'streaming' to set_rollover", area=area)
python
def set_rollover(self, area, enabled): """Configure whether rollover is enabled for streaming or storage streams. Normally a SensorLog is used in ring-buffer mode which means that old readings are automatically overwritten as needed when new data is saved. However, you can configure it into fill-stop mode by using: set_rollover("streaming"|"storage", True|False) By default rollover is set to True for both streaming and storage and can be controlled individually for each one. Args: area (str): Either streaming or storage. enabled (bool): Whether to enable or disable rollover. """ if area == u'streaming': self._rollover_streaming = enabled elif area == u'storage': self._rollover_storage = enabled else: raise ArgumentError("You must pass one of 'storage' or 'streaming' to set_rollover", area=area)
['def', 'set_rollover', '(', 'self', ',', 'area', ',', 'enabled', ')', ':', 'if', 'area', '==', "u'streaming'", ':', 'self', '.', '_rollover_streaming', '=', 'enabled', 'elif', 'area', '==', "u'storage'", ':', 'self', '.', '_rollover_storage', '=', 'enabled', 'else', ':', 'raise', 'ArgumentError', '(', '"You must pass one of \'storage\' or \'streaming\' to set_rollover"', ',', 'area', '=', 'area', ')']
Configure whether rollover is enabled for streaming or storage streams. Normally a SensorLog is used in ring-buffer mode which means that old readings are automatically overwritten as needed when new data is saved. However, you can configure it into fill-stop mode by using: set_rollover("streaming"|"storage", True|False) By default rollover is set to True for both streaming and storage and can be controlled individually for each one. Args: area (str): Either streaming or storage. enabled (bool): Whether to enable or disable rollover.
['Configure', 'whether', 'rollover', 'is', 'enabled', 'for', 'streaming', 'or', 'storage', 'streams', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/sensor_log.py#L146-L168
7,140
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_console.py
ConsoleModule.unload
def unload(self): '''unload module''' self.mpstate.console.close() self.mpstate.console = textconsole.SimpleConsole()
python
def unload(self): '''unload module''' self.mpstate.console.close() self.mpstate.console = textconsole.SimpleConsole()
['def', 'unload', '(', 'self', ')', ':', 'self', '.', 'mpstate', '.', 'console', '.', 'close', '(', ')', 'self', '.', 'mpstate', '.', 'console', '=', 'textconsole', '.', 'SimpleConsole', '(', ')']
unload module
['unload', 'module']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_console.py#L72-L75
7,141
Kjwon15/autotweet
autotweet/learning.py
DataCollection.add_document
def add_document(self, question, answer): """Add question answer set to DB. :param question: A question to an answer :type question: :class:`str` :param answer: An answer to a question :type answer: :class:`str` """ question = question.strip() answer = answer.strip() session = self.Session() if session.query(Document) \ .filter_by(text=question, answer=answer).count(): logger.info('Already here: {0} -> {1}'.format(question, answer)) return logger.info('add document: {0} -> {1}'.format(question, answer)) grams = self._get_grams(session, question, make=True) doc = Document(question, answer) doc.grams = list(grams) self._recalc_idfs(session, grams) session.add(doc) session.commit()
python
def add_document(self, question, answer): """Add question answer set to DB. :param question: A question to an answer :type question: :class:`str` :param answer: An answer to a question :type answer: :class:`str` """ question = question.strip() answer = answer.strip() session = self.Session() if session.query(Document) \ .filter_by(text=question, answer=answer).count(): logger.info('Already here: {0} -> {1}'.format(question, answer)) return logger.info('add document: {0} -> {1}'.format(question, answer)) grams = self._get_grams(session, question, make=True) doc = Document(question, answer) doc.grams = list(grams) self._recalc_idfs(session, grams) session.add(doc) session.commit()
['def', 'add_document', '(', 'self', ',', 'question', ',', 'answer', ')', ':', 'question', '=', 'question', '.', 'strip', '(', ')', 'answer', '=', 'answer', '.', 'strip', '(', ')', 'session', '=', 'self', '.', 'Session', '(', ')', 'if', 'session', '.', 'query', '(', 'Document', ')', '.', 'filter_by', '(', 'text', '=', 'question', ',', 'answer', '=', 'answer', ')', '.', 'count', '(', ')', ':', 'logger', '.', 'info', '(', "'Already here: {0} -> {1}'", '.', 'format', '(', 'question', ',', 'answer', ')', ')', 'return', 'logger', '.', 'info', '(', "'add document: {0} -> {1}'", '.', 'format', '(', 'question', ',', 'answer', ')', ')', 'grams', '=', 'self', '.', '_get_grams', '(', 'session', ',', 'question', ',', 'make', '=', 'True', ')', 'doc', '=', 'Document', '(', 'question', ',', 'answer', ')', 'doc', '.', 'grams', '=', 'list', '(', 'grams', ')', 'self', '.', '_recalc_idfs', '(', 'session', ',', 'grams', ')', 'session', '.', 'add', '(', 'doc', ')', 'session', '.', 'commit', '(', ')']
Add question answer set to DB. :param question: A question to an answer :type question: :class:`str` :param answer: An answer to a question :type answer: :class:`str`
['Add', 'question', 'answer', 'set', 'to', 'DB', '.']
train
https://github.com/Kjwon15/autotweet/blob/c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e/autotweet/learning.py#L30-L58
7,142
saltstack/salt
salt/key.py
Key.check_minion_cache
def check_minion_cache(self, preserve_minions=None): ''' Check the minion cache to make sure that old minion data is cleared Optionally, pass in a list of minions which should have their caches preserved. To preserve all caches, set __opts__['preserve_minion_cache'] ''' if preserve_minions is None: preserve_minions = [] keys = self.list_keys() minions = [] for key, val in six.iteritems(keys): minions.extend(val) if not self.opts.get('preserve_minion_cache', False): m_cache = os.path.join(self.opts['cachedir'], self.ACC) if os.path.isdir(m_cache): for minion in os.listdir(m_cache): if minion not in minions and minion not in preserve_minions: try: shutil.rmtree(os.path.join(m_cache, minion)) except (OSError, IOError) as ex: log.warning('Key: Delete cache for %s got OSError/IOError: %s \n', minion, ex) continue cache = salt.cache.factory(self.opts) clist = cache.list(self.ACC) if clist: for minion in clist: if minion not in minions and minion not in preserve_minions: cache.flush('{0}/{1}'.format(self.ACC, minion))
python
def check_minion_cache(self, preserve_minions=None): ''' Check the minion cache to make sure that old minion data is cleared Optionally, pass in a list of minions which should have their caches preserved. To preserve all caches, set __opts__['preserve_minion_cache'] ''' if preserve_minions is None: preserve_minions = [] keys = self.list_keys() minions = [] for key, val in six.iteritems(keys): minions.extend(val) if not self.opts.get('preserve_minion_cache', False): m_cache = os.path.join(self.opts['cachedir'], self.ACC) if os.path.isdir(m_cache): for minion in os.listdir(m_cache): if minion not in minions and minion not in preserve_minions: try: shutil.rmtree(os.path.join(m_cache, minion)) except (OSError, IOError) as ex: log.warning('Key: Delete cache for %s got OSError/IOError: %s \n', minion, ex) continue cache = salt.cache.factory(self.opts) clist = cache.list(self.ACC) if clist: for minion in clist: if minion not in minions and minion not in preserve_minions: cache.flush('{0}/{1}'.format(self.ACC, minion))
['def', 'check_minion_cache', '(', 'self', ',', 'preserve_minions', '=', 'None', ')', ':', 'if', 'preserve_minions', 'is', 'None', ':', 'preserve_minions', '=', '[', ']', 'keys', '=', 'self', '.', 'list_keys', '(', ')', 'minions', '=', '[', ']', 'for', 'key', ',', 'val', 'in', 'six', '.', 'iteritems', '(', 'keys', ')', ':', 'minions', '.', 'extend', '(', 'val', ')', 'if', 'not', 'self', '.', 'opts', '.', 'get', '(', "'preserve_minion_cache'", ',', 'False', ')', ':', 'm_cache', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'opts', '[', "'cachedir'", ']', ',', 'self', '.', 'ACC', ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'm_cache', ')', ':', 'for', 'minion', 'in', 'os', '.', 'listdir', '(', 'm_cache', ')', ':', 'if', 'minion', 'not', 'in', 'minions', 'and', 'minion', 'not', 'in', 'preserve_minions', ':', 'try', ':', 'shutil', '.', 'rmtree', '(', 'os', '.', 'path', '.', 'join', '(', 'm_cache', ',', 'minion', ')', ')', 'except', '(', 'OSError', ',', 'IOError', ')', 'as', 'ex', ':', 'log', '.', 'warning', '(', "'Key: Delete cache for %s got OSError/IOError: %s \\n'", ',', 'minion', ',', 'ex', ')', 'continue', 'cache', '=', 'salt', '.', 'cache', '.', 'factory', '(', 'self', '.', 'opts', ')', 'clist', '=', 'cache', '.', 'list', '(', 'self', '.', 'ACC', ')', 'if', 'clist', ':', 'for', 'minion', 'in', 'clist', ':', 'if', 'minion', 'not', 'in', 'minions', 'and', 'minion', 'not', 'in', 'preserve_minions', ':', 'cache', '.', 'flush', '(', "'{0}/{1}'", '.', 'format', '(', 'self', '.', 'ACC', ',', 'minion', ')', ')']
Check the minion cache to make sure that old minion data is cleared Optionally, pass in a list of minions which should have their caches preserved. To preserve all caches, set __opts__['preserve_minion_cache']
['Check', 'the', 'minion', 'cache', 'to', 'make', 'sure', 'that', 'old', 'minion', 'data', 'is', 'cleared']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/key.py#L418-L448
7,143
gusdan/django-elasticache
django_elasticache/memcached.py
invalidate_cache_after_error
def invalidate_cache_after_error(f): """ catch any exception and invalidate internal cache with list of nodes """ @wraps(f) def wrapper(self, *args, **kwds): try: return f(self, *args, **kwds) except Exception: self.clear_cluster_nodes_cache() raise return wrapper
python
def invalidate_cache_after_error(f): """ catch any exception and invalidate internal cache with list of nodes """ @wraps(f) def wrapper(self, *args, **kwds): try: return f(self, *args, **kwds) except Exception: self.clear_cluster_nodes_cache() raise return wrapper
['def', 'invalidate_cache_after_error', '(', 'f', ')', ':', '@', 'wraps', '(', 'f', ')', 'def', 'wrapper', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwds', ')', ':', 'try', ':', 'return', 'f', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwds', ')', 'except', 'Exception', ':', 'self', '.', 'clear_cluster_nodes_cache', '(', ')', 'raise', 'return', 'wrapper']
catch any exception and invalidate internal cache with list of nodes
['catch', 'any', 'exception', 'and', 'invalidate', 'internal', 'cache', 'with', 'list', 'of', 'nodes']
train
https://github.com/gusdan/django-elasticache/blob/5f93c06ca8f264e3bd85b5f7044fd07733282e42/django_elasticache/memcached.py#L11-L22
7,144
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_rmon.py
brocade_rmon.rmon_event_entry_log
def rmon_event_entry_log(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon") event_entry = ET.SubElement(rmon, "event-entry") event_index_key = ET.SubElement(event_entry, "event-index") event_index_key.text = kwargs.pop('event_index') log = ET.SubElement(event_entry, "log") callback = kwargs.pop('callback', self._callback) return callback(config)
python
def rmon_event_entry_log(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon") event_entry = ET.SubElement(rmon, "event-entry") event_index_key = ET.SubElement(event_entry, "event-index") event_index_key.text = kwargs.pop('event_index') log = ET.SubElement(event_entry, "log") callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'rmon_event_entry_log', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'rmon', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"rmon"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-rmon"', ')', 'event_entry', '=', 'ET', '.', 'SubElement', '(', 'rmon', ',', '"event-entry"', ')', 'event_index_key', '=', 'ET', '.', 'SubElement', '(', 'event_entry', ',', '"event-index"', ')', 'event_index_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'event_index'", ')', 'log', '=', 'ET', '.', 'SubElement', '(', 'event_entry', ',', '"log"', ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_rmon.py#L38-L49
7,145
knipknap/exscript
Exscript/util/ipv6.py
clean_ip
def clean_ip(ip): """ Cleans the ip address up, useful for removing leading zeros, e.g.:: 1234:0:01:02:: -> 1234:0:1:2:: 1234:0000:0000:0000:0000:0000:0000:000A -> 1234::a 1234:0000:0000:0000:0001:0000:0000:0000 -> 1234:0:0:0:1:: 0000:0000:0000:0000:0001:0000:0000:0000 -> ::1:0:0:0 :type ip: string :param ip: An IP address. :rtype: string :return: The cleaned up IP. """ theip = normalize_ip(ip) segments = ['%x' % int(s, 16) for s in theip.split(':')] # Find the longest consecutive sequence of zeroes. seq = {0: 0} start = None count = 0 for n, segment in enumerate(segments): if segment != '0': start = None count = 0 continue if start is None: start = n count += 1 seq[count] = start # Replace those zeroes by a double colon. count = max(seq) start = seq[count] result = [] for n, segment in enumerate(segments): if n == start and count > 1: if n == 0: result.append('') result.append('') if n == 7: result.append('') continue elif start < n < start + count: if n == 7: result.append('') continue result.append(segment) return ':'.join(result)
python
def clean_ip(ip): """ Cleans the ip address up, useful for removing leading zeros, e.g.:: 1234:0:01:02:: -> 1234:0:1:2:: 1234:0000:0000:0000:0000:0000:0000:000A -> 1234::a 1234:0000:0000:0000:0001:0000:0000:0000 -> 1234:0:0:0:1:: 0000:0000:0000:0000:0001:0000:0000:0000 -> ::1:0:0:0 :type ip: string :param ip: An IP address. :rtype: string :return: The cleaned up IP. """ theip = normalize_ip(ip) segments = ['%x' % int(s, 16) for s in theip.split(':')] # Find the longest consecutive sequence of zeroes. seq = {0: 0} start = None count = 0 for n, segment in enumerate(segments): if segment != '0': start = None count = 0 continue if start is None: start = n count += 1 seq[count] = start # Replace those zeroes by a double colon. count = max(seq) start = seq[count] result = [] for n, segment in enumerate(segments): if n == start and count > 1: if n == 0: result.append('') result.append('') if n == 7: result.append('') continue elif start < n < start + count: if n == 7: result.append('') continue result.append(segment) return ':'.join(result)
['def', 'clean_ip', '(', 'ip', ')', ':', 'theip', '=', 'normalize_ip', '(', 'ip', ')', 'segments', '=', '[', "'%x'", '%', 'int', '(', 's', ',', '16', ')', 'for', 's', 'in', 'theip', '.', 'split', '(', "':'", ')', ']', '# Find the longest consecutive sequence of zeroes.', 'seq', '=', '{', '0', ':', '0', '}', 'start', '=', 'None', 'count', '=', '0', 'for', 'n', ',', 'segment', 'in', 'enumerate', '(', 'segments', ')', ':', 'if', 'segment', '!=', "'0'", ':', 'start', '=', 'None', 'count', '=', '0', 'continue', 'if', 'start', 'is', 'None', ':', 'start', '=', 'n', 'count', '+=', '1', 'seq', '[', 'count', ']', '=', 'start', '# Replace those zeroes by a double colon.', 'count', '=', 'max', '(', 'seq', ')', 'start', '=', 'seq', '[', 'count', ']', 'result', '=', '[', ']', 'for', 'n', ',', 'segment', 'in', 'enumerate', '(', 'segments', ')', ':', 'if', 'n', '==', 'start', 'and', 'count', '>', '1', ':', 'if', 'n', '==', '0', ':', 'result', '.', 'append', '(', "''", ')', 'result', '.', 'append', '(', "''", ')', 'if', 'n', '==', '7', ':', 'result', '.', 'append', '(', "''", ')', 'continue', 'elif', 'start', '<', 'n', '<', 'start', '+', 'count', ':', 'if', 'n', '==', '7', ':', 'result', '.', 'append', '(', "''", ')', 'continue', 'result', '.', 'append', '(', 'segment', ')', 'return', "':'", '.', 'join', '(', 'result', ')']
Cleans the ip address up, useful for removing leading zeros, e.g.:: 1234:0:01:02:: -> 1234:0:1:2:: 1234:0000:0000:0000:0000:0000:0000:000A -> 1234::a 1234:0000:0000:0000:0001:0000:0000:0000 -> 1234:0:0:0:1:: 0000:0000:0000:0000:0001:0000:0000:0000 -> ::1:0:0:0 :type ip: string :param ip: An IP address. :rtype: string :return: The cleaned up IP.
['Cleans', 'the', 'ip', 'address', 'up', 'useful', 'for', 'removing', 'leading', 'zeros', 'e', '.', 'g', '.', '::']
train
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/ipv6.py#L85-L133
7,146
LEMS/pylems
lems/model/dynamics.py
ConditionalDerivedVariable.add
def add(self, child): """ Adds a typed child object to the conditional derived variable. @param child: Child object to be added. """ if isinstance(child, Case): self.add_case(child) else: raise ModelError('Unsupported child element')
python
def add(self, child): """ Adds a typed child object to the conditional derived variable. @param child: Child object to be added. """ if isinstance(child, Case): self.add_case(child) else: raise ModelError('Unsupported child element')
['def', 'add', '(', 'self', ',', 'child', ')', ':', 'if', 'isinstance', '(', 'child', ',', 'Case', ')', ':', 'self', '.', 'add_case', '(', 'child', ')', 'else', ':', 'raise', 'ModelError', '(', "'Unsupported child element'", ')']
Adds a typed child object to the conditional derived variable. @param child: Child object to be added.
['Adds', 'a', 'typed', 'child', 'object', 'to', 'the', 'conditional', 'derived', 'variable', '.']
train
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/dynamics.py#L203-L213
7,147
csparpa/pyowm
pyowm/commons/image.py
Image.load
def load(cls, path_to_file): """ Loads the image data from a file on disk and tries to guess the image MIME type :param path_to_file: path to the source file :type path_to_file: str :return: a `pyowm.image.Image` instance """ import mimetypes mimetypes.init() mime = mimetypes.guess_type('file://%s' % path_to_file)[0] img_type = ImageTypeEnum.lookup_by_mime_type(mime) with open(path_to_file, 'rb') as f: data = f.read() return Image(data, image_type=img_type)
python
def load(cls, path_to_file): """ Loads the image data from a file on disk and tries to guess the image MIME type :param path_to_file: path to the source file :type path_to_file: str :return: a `pyowm.image.Image` instance """ import mimetypes mimetypes.init() mime = mimetypes.guess_type('file://%s' % path_to_file)[0] img_type = ImageTypeEnum.lookup_by_mime_type(mime) with open(path_to_file, 'rb') as f: data = f.read() return Image(data, image_type=img_type)
['def', 'load', '(', 'cls', ',', 'path_to_file', ')', ':', 'import', 'mimetypes', 'mimetypes', '.', 'init', '(', ')', 'mime', '=', 'mimetypes', '.', 'guess_type', '(', "'file://%s'", '%', 'path_to_file', ')', '[', '0', ']', 'img_type', '=', 'ImageTypeEnum', '.', 'lookup_by_mime_type', '(', 'mime', ')', 'with', 'open', '(', 'path_to_file', ',', "'rb'", ')', 'as', 'f', ':', 'data', '=', 'f', '.', 'read', '(', ')', 'return', 'Image', '(', 'data', ',', 'image_type', '=', 'img_type', ')']
Loads the image data from a file on disk and tries to guess the image MIME type :param path_to_file: path to the source file :type path_to_file: str :return: a `pyowm.image.Image` instance
['Loads', 'the', 'image', 'data', 'from', 'a', 'file', 'on', 'disk', 'and', 'tries', 'to', 'guess', 'the', 'image', 'MIME', 'type']
train
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/commons/image.py#L34-L48
7,148
open-homeautomation/pknx
knxip/ip.py
KNXIPFrame.header
def header(self): """Return the frame header (as an array of bytes).""" total_length = self.total_length() res = [0x06, 0x10, 0, 0, 0, 0] res[2] = (self.service_type_id >> 8) & 0xff res[3] = (self.service_type_id >> 0) & 0xff res[4] = (total_length >> 8) & 0xff res[5] = (total_length >> 0) & 0xff return res
python
def header(self): """Return the frame header (as an array of bytes).""" total_length = self.total_length() res = [0x06, 0x10, 0, 0, 0, 0] res[2] = (self.service_type_id >> 8) & 0xff res[3] = (self.service_type_id >> 0) & 0xff res[4] = (total_length >> 8) & 0xff res[5] = (total_length >> 0) & 0xff return res
['def', 'header', '(', 'self', ')', ':', 'total_length', '=', 'self', '.', 'total_length', '(', ')', 'res', '=', '[', '0x06', ',', '0x10', ',', '0', ',', '0', ',', '0', ',', '0', ']', 'res', '[', '2', ']', '=', '(', 'self', '.', 'service_type_id', '>>', '8', ')', '&', '0xff', 'res', '[', '3', ']', '=', '(', 'self', '.', 'service_type_id', '>>', '0', ')', '&', '0xff', 'res', '[', '4', ']', '=', '(', 'total_length', '>>', '8', ')', '&', '0xff', 'res', '[', '5', ']', '=', '(', 'total_length', '>>', '0', ')', '&', '0xff', 'return', 'res']
Return the frame header (as an array of bytes).
['Return', 'the', 'frame', 'header', '(', 'as', 'an', 'array', 'of', 'bytes', ')', '.']
train
https://github.com/open-homeautomation/pknx/blob/a8aed8271563923c447aa330ba7c1c2927286f7a/knxip/ip.py#L72-L80
7,149
facundobatista/yaswfp
yaswfp/swfparser.py
SWFParser._generic_action_parser
def _generic_action_parser(self): """Generic parser for Actions.""" actions = [] while True: action_code = unpack_ui8(self._src) if action_code == 0: break action_name = ACTION_NAMES[action_code] if action_code > 128: # have a payload! action_len = unpack_ui16(self._src) try: action_meth = getattr( self, "_handle_" + action_name.lower()) except AttributeError: if self.unknown_alert: raise ValueError( "Unknown action: " + repr(action_name)) action_payload = self._src.read(action_len) _dict = {'__str__': _repr, '__repr__': _repr, 'name': action_name} action = type("UnknownAction", (SWFObject,), _dict)() action.raw_payload = action_payload actions.append(action) else: prev_pos = self._src.tell() for action in action_meth(action_len): assert action is not None, action_name actions.append(action) quant_read = self._src.tell() - prev_pos if quant_read != action_len: raise RuntimeError( "Bad bytes consumption by action {!r} handler " "(did {}, should {})".format( action_name, quant_read, action_len)) else: action = _make_object(action_name) actions.append(action) return actions
python
def _generic_action_parser(self): """Generic parser for Actions.""" actions = [] while True: action_code = unpack_ui8(self._src) if action_code == 0: break action_name = ACTION_NAMES[action_code] if action_code > 128: # have a payload! action_len = unpack_ui16(self._src) try: action_meth = getattr( self, "_handle_" + action_name.lower()) except AttributeError: if self.unknown_alert: raise ValueError( "Unknown action: " + repr(action_name)) action_payload = self._src.read(action_len) _dict = {'__str__': _repr, '__repr__': _repr, 'name': action_name} action = type("UnknownAction", (SWFObject,), _dict)() action.raw_payload = action_payload actions.append(action) else: prev_pos = self._src.tell() for action in action_meth(action_len): assert action is not None, action_name actions.append(action) quant_read = self._src.tell() - prev_pos if quant_read != action_len: raise RuntimeError( "Bad bytes consumption by action {!r} handler " "(did {}, should {})".format( action_name, quant_read, action_len)) else: action = _make_object(action_name) actions.append(action) return actions
['def', '_generic_action_parser', '(', 'self', ')', ':', 'actions', '=', '[', ']', 'while', 'True', ':', 'action_code', '=', 'unpack_ui8', '(', 'self', '.', '_src', ')', 'if', 'action_code', '==', '0', ':', 'break', 'action_name', '=', 'ACTION_NAMES', '[', 'action_code', ']', 'if', 'action_code', '>', '128', ':', '# have a payload!', 'action_len', '=', 'unpack_ui16', '(', 'self', '.', '_src', ')', 'try', ':', 'action_meth', '=', 'getattr', '(', 'self', ',', '"_handle_"', '+', 'action_name', '.', 'lower', '(', ')', ')', 'except', 'AttributeError', ':', 'if', 'self', '.', 'unknown_alert', ':', 'raise', 'ValueError', '(', '"Unknown action: "', '+', 'repr', '(', 'action_name', ')', ')', 'action_payload', '=', 'self', '.', '_src', '.', 'read', '(', 'action_len', ')', '_dict', '=', '{', "'__str__'", ':', '_repr', ',', "'__repr__'", ':', '_repr', ',', "'name'", ':', 'action_name', '}', 'action', '=', 'type', '(', '"UnknownAction"', ',', '(', 'SWFObject', ',', ')', ',', '_dict', ')', '(', ')', 'action', '.', 'raw_payload', '=', 'action_payload', 'actions', '.', 'append', '(', 'action', ')', 'else', ':', 'prev_pos', '=', 'self', '.', '_src', '.', 'tell', '(', ')', 'for', 'action', 'in', 'action_meth', '(', 'action_len', ')', ':', 'assert', 'action', 'is', 'not', 'None', ',', 'action_name', 'actions', '.', 'append', '(', 'action', ')', 'quant_read', '=', 'self', '.', '_src', '.', 'tell', '(', ')', '-', 'prev_pos', 'if', 'quant_read', '!=', 'action_len', ':', 'raise', 'RuntimeError', '(', '"Bad bytes consumption by action {!r} handler "', '"(did {}, should {})"', '.', 'format', '(', 'action_name', ',', 'quant_read', ',', 'action_len', ')', ')', 'else', ':', 'action', '=', '_make_object', '(', 'action_name', ')', 'actions', '.', 'append', '(', 'action', ')', 'return', 'actions']
Generic parser for Actions.
['Generic', 'parser', 'for', 'Actions', '.']
train
https://github.com/facundobatista/yaswfp/blob/2a2cc6ca4c0b4d52bd2e658fb5f80fdc0db4924c/yaswfp/swfparser.py#L599-L640
7,150
DsixTools/python-smeftrunner
smeftrunner/io.py
sm_dict2lha
def sm_dict2lha(d): """Convert a a dictionary of SM parameters into a dictionary that pylha can convert into a DSixTools SM output file.""" blocks = OrderedDict([ ('GAUGE', {'values': [[1, d['g'].real], [2, d['gp'].real], [3, d['gs'].real]]}), ('SCALAR', {'values': [[1, d['Lambda'].real], [2, d['m2'].real]]}), ('GU', {'values': matrix2lha(d['Gu'].real)}), ('IMGU', {'values': matrix2lha(d['Gu'].imag)}), ('GD', {'values': matrix2lha(d['Gd'].real)}), ('IMGD', {'values': matrix2lha(d['Gd'].imag)}), ('GE', {'values': matrix2lha(d['Ge'].real)}), ('IMGE', {'values': matrix2lha(d['Ge'].imag)}), ('THETA', {'values': [[1, d['Theta'].real], [2, d['Thetap'].real], [3, d['Thetas'].real]]}), ]) return {'BLOCK': blocks}
python
def sm_dict2lha(d): """Convert a a dictionary of SM parameters into a dictionary that pylha can convert into a DSixTools SM output file.""" blocks = OrderedDict([ ('GAUGE', {'values': [[1, d['g'].real], [2, d['gp'].real], [3, d['gs'].real]]}), ('SCALAR', {'values': [[1, d['Lambda'].real], [2, d['m2'].real]]}), ('GU', {'values': matrix2lha(d['Gu'].real)}), ('IMGU', {'values': matrix2lha(d['Gu'].imag)}), ('GD', {'values': matrix2lha(d['Gd'].real)}), ('IMGD', {'values': matrix2lha(d['Gd'].imag)}), ('GE', {'values': matrix2lha(d['Ge'].real)}), ('IMGE', {'values': matrix2lha(d['Ge'].imag)}), ('THETA', {'values': [[1, d['Theta'].real], [2, d['Thetap'].real], [3, d['Thetas'].real]]}), ]) return {'BLOCK': blocks}
['def', 'sm_dict2lha', '(', 'd', ')', ':', 'blocks', '=', 'OrderedDict', '(', '[', '(', "'GAUGE'", ',', '{', "'values'", ':', '[', '[', '1', ',', 'd', '[', "'g'", ']', '.', 'real', ']', ',', '[', '2', ',', 'd', '[', "'gp'", ']', '.', 'real', ']', ',', '[', '3', ',', 'd', '[', "'gs'", ']', '.', 'real', ']', ']', '}', ')', ',', '(', "'SCALAR'", ',', '{', "'values'", ':', '[', '[', '1', ',', 'd', '[', "'Lambda'", ']', '.', 'real', ']', ',', '[', '2', ',', 'd', '[', "'m2'", ']', '.', 'real', ']', ']', '}', ')', ',', '(', "'GU'", ',', '{', "'values'", ':', 'matrix2lha', '(', 'd', '[', "'Gu'", ']', '.', 'real', ')', '}', ')', ',', '(', "'IMGU'", ',', '{', "'values'", ':', 'matrix2lha', '(', 'd', '[', "'Gu'", ']', '.', 'imag', ')', '}', ')', ',', '(', "'GD'", ',', '{', "'values'", ':', 'matrix2lha', '(', 'd', '[', "'Gd'", ']', '.', 'real', ')', '}', ')', ',', '(', "'IMGD'", ',', '{', "'values'", ':', 'matrix2lha', '(', 'd', '[', "'Gd'", ']', '.', 'imag', ')', '}', ')', ',', '(', "'GE'", ',', '{', "'values'", ':', 'matrix2lha', '(', 'd', '[', "'Ge'", ']', '.', 'real', ')', '}', ')', ',', '(', "'IMGE'", ',', '{', "'values'", ':', 'matrix2lha', '(', 'd', '[', "'Ge'", ']', '.', 'imag', ')', '}', ')', ',', '(', "'THETA'", ',', '{', "'values'", ':', '[', '[', '1', ',', 'd', '[', "'Theta'", ']', '.', 'real', ']', ',', '[', '2', ',', 'd', '[', "'Thetap'", ']', '.', 'real', ']', ',', '[', '3', ',', 'd', '[', "'Thetas'", ']', '.', 'real', ']', ']', '}', ')', ',', ']', ')', 'return', '{', "'BLOCK'", ':', 'blocks', '}']
Convert a a dictionary of SM parameters into a dictionary that pylha can convert into a DSixTools SM output file.
['Convert', 'a', 'a', 'dictionary', 'of', 'SM', 'parameters', 'into', 'a', 'dictionary', 'that', 'pylha', 'can', 'convert', 'into', 'a', 'DSixTools', 'SM', 'output', 'file', '.']
train
https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/io.py#L72-L86
7,151
cackharot/suds-py3
suds/properties.py
Properties.get
def get(self, name, *df): """ Get the value of a property by I{name}. @param name: The property name. @type name: str @param df: An optional value to be returned when the value is not set @type df: [1]. @return: The stored value, or I{df[0]} if not set. @rtype: any """ return self.provider(name).__get(name, *df)
python
def get(self, name, *df): """ Get the value of a property by I{name}. @param name: The property name. @type name: str @param df: An optional value to be returned when the value is not set @type df: [1]. @return: The stored value, or I{df[0]} if not set. @rtype: any """ return self.provider(name).__get(name, *df)
['def', 'get', '(', 'self', ',', 'name', ',', '*', 'df', ')', ':', 'return', 'self', '.', 'provider', '(', 'name', ')', '.', '__get', '(', 'name', ',', '*', 'df', ')']
Get the value of a property by I{name}. @param name: The property name. @type name: str @param df: An optional value to be returned when the value is not set @type df: [1]. @return: The stored value, or I{df[0]} if not set. @rtype: any
['Get', 'the', 'value', 'of', 'a', 'property', 'by', 'I', '{', 'name', '}', '.']
train
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/properties.py#L297-L308
7,152
getnikola/coil
coil/web.py
acp_users_delete
def acp_users_delete(): """Delete or undelete an user account.""" if not current_user.is_admin: return error("Not authorized to edit users.", 401) if not db: return error('The ACP is not available in single-user mode.', 500) form = UserDeleteForm() if not form.validate(): return error("Bad Request", 400) user = get_user(int(request.form['uid'])) direction = request.form['direction'] if not user: return error("User does not exist.", 404) else: for p in PERMISSIONS: setattr(user, p, False) user.active = direction == 'undel' write_user(user) return redirect(url_for('acp_users') + '?status={_del}eted'.format( _del=direction))
python
def acp_users_delete(): """Delete or undelete an user account.""" if not current_user.is_admin: return error("Not authorized to edit users.", 401) if not db: return error('The ACP is not available in single-user mode.', 500) form = UserDeleteForm() if not form.validate(): return error("Bad Request", 400) user = get_user(int(request.form['uid'])) direction = request.form['direction'] if not user: return error("User does not exist.", 404) else: for p in PERMISSIONS: setattr(user, p, False) user.active = direction == 'undel' write_user(user) return redirect(url_for('acp_users') + '?status={_del}eted'.format( _del=direction))
['def', 'acp_users_delete', '(', ')', ':', 'if', 'not', 'current_user', '.', 'is_admin', ':', 'return', 'error', '(', '"Not authorized to edit users."', ',', '401', ')', 'if', 'not', 'db', ':', 'return', 'error', '(', "'The ACP is not available in single-user mode.'", ',', '500', ')', 'form', '=', 'UserDeleteForm', '(', ')', 'if', 'not', 'form', '.', 'validate', '(', ')', ':', 'return', 'error', '(', '"Bad Request"', ',', '400', ')', 'user', '=', 'get_user', '(', 'int', '(', 'request', '.', 'form', '[', "'uid'", ']', ')', ')', 'direction', '=', 'request', '.', 'form', '[', "'direction'", ']', 'if', 'not', 'user', ':', 'return', 'error', '(', '"User does not exist."', ',', '404', ')', 'else', ':', 'for', 'p', 'in', 'PERMISSIONS', ':', 'setattr', '(', 'user', ',', 'p', ',', 'False', ')', 'user', '.', 'active', '=', 'direction', '==', "'undel'", 'write_user', '(', 'user', ')', 'return', 'redirect', '(', 'url_for', '(', "'acp_users'", ')', '+', "'?status={_del}eted'", '.', 'format', '(', '_del', '=', 'direction', ')', ')']
Delete or undelete an user account.
['Delete', 'or', 'undelete', 'an', 'user', 'account', '.']
train
https://github.com/getnikola/coil/blob/80ef1827460b0691cf2c98351a14d88e235c9899/coil/web.py#L1144-L1164
7,153
RudolfCardinal/pythonlib
cardinal_pythonlib/platformfunc.py
are_debian_packages_installed
def are_debian_packages_installed(packages: List[str]) -> Dict[str, bool]: """ Check which of a list of Debian packages are installed, via ``dpkg-query``. Args: packages: list of Debian package names Returns: dict: mapping from package name to boolean ("present?") """ assert len(packages) >= 1 require_executable(DPKG_QUERY) args = [ DPKG_QUERY, "-W", # --show # "-f='${Package} ${Status} ${Version}\n'", "-f=${Package} ${Status}\n", # --showformat ] + packages completed_process = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) encoding = sys.getdefaultencoding() stdout = completed_process.stdout.decode(encoding) stderr = completed_process.stderr.decode(encoding) present = OrderedDict() for line in stdout.split("\n"): if line: # e.g. "autoconf install ok installed" words = line.split() assert len(words) >= 2 package = words[0] present[package] = "installed" in words[1:] for line in stderr.split("\n"): if line: # e.g. "dpkg-query: no packages found matching XXX" words = line.split() assert len(words) >= 2 package = words[-1] present[package] = False log.debug("Debian package presence: {}", present) return present
python
def are_debian_packages_installed(packages: List[str]) -> Dict[str, bool]: """ Check which of a list of Debian packages are installed, via ``dpkg-query``. Args: packages: list of Debian package names Returns: dict: mapping from package name to boolean ("present?") """ assert len(packages) >= 1 require_executable(DPKG_QUERY) args = [ DPKG_QUERY, "-W", # --show # "-f='${Package} ${Status} ${Version}\n'", "-f=${Package} ${Status}\n", # --showformat ] + packages completed_process = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) encoding = sys.getdefaultencoding() stdout = completed_process.stdout.decode(encoding) stderr = completed_process.stderr.decode(encoding) present = OrderedDict() for line in stdout.split("\n"): if line: # e.g. "autoconf install ok installed" words = line.split() assert len(words) >= 2 package = words[0] present[package] = "installed" in words[1:] for line in stderr.split("\n"): if line: # e.g. "dpkg-query: no packages found matching XXX" words = line.split() assert len(words) >= 2 package = words[-1] present[package] = False log.debug("Debian package presence: {}", present) return present
['def', 'are_debian_packages_installed', '(', 'packages', ':', 'List', '[', 'str', ']', ')', '->', 'Dict', '[', 'str', ',', 'bool', ']', ':', 'assert', 'len', '(', 'packages', ')', '>=', '1', 'require_executable', '(', 'DPKG_QUERY', ')', 'args', '=', '[', 'DPKG_QUERY', ',', '"-W"', ',', '# --show', '# "-f=\'${Package} ${Status} ${Version}\\n\'",', '"-f=${Package} ${Status}\\n"', ',', '# --showformat', ']', '+', 'packages', 'completed_process', '=', 'subprocess', '.', 'run', '(', 'args', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ',', 'check', '=', 'False', ')', 'encoding', '=', 'sys', '.', 'getdefaultencoding', '(', ')', 'stdout', '=', 'completed_process', '.', 'stdout', '.', 'decode', '(', 'encoding', ')', 'stderr', '=', 'completed_process', '.', 'stderr', '.', 'decode', '(', 'encoding', ')', 'present', '=', 'OrderedDict', '(', ')', 'for', 'line', 'in', 'stdout', '.', 'split', '(', '"\\n"', ')', ':', 'if', 'line', ':', '# e.g. "autoconf install ok installed"', 'words', '=', 'line', '.', 'split', '(', ')', 'assert', 'len', '(', 'words', ')', '>=', '2', 'package', '=', 'words', '[', '0', ']', 'present', '[', 'package', ']', '=', '"installed"', 'in', 'words', '[', '1', ':', ']', 'for', 'line', 'in', 'stderr', '.', 'split', '(', '"\\n"', ')', ':', 'if', 'line', ':', '# e.g. "dpkg-query: no packages found matching XXX"', 'words', '=', 'line', '.', 'split', '(', ')', 'assert', 'len', '(', 'words', ')', '>=', '2', 'package', '=', 'words', '[', '-', '1', ']', 'present', '[', 'package', ']', '=', 'False', 'log', '.', 'debug', '(', '"Debian package presence: {}"', ',', 'present', ')', 'return', 'present']
Check which of a list of Debian packages are installed, via ``dpkg-query``. Args: packages: list of Debian package names Returns: dict: mapping from package name to boolean ("present?")
['Check', 'which', 'of', 'a', 'list', 'of', 'Debian', 'packages', 'are', 'installed', 'via', 'dpkg', '-', 'query', '.']
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/platformfunc.py#L104-L144
7,154
PGower/PyCanvas
pycanvas/apis/enrollments.py
EnrollmentsAPI.conclude_deactivate_or_delete_enrollment
def conclude_deactivate_or_delete_enrollment(self, id, course_id, task=None): """ Conclude, deactivate, or delete an enrollment. Conclude, deactivate, or delete an enrollment. If the +task+ argument isn't given, the enrollment will be concluded. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - task """The action to take on the enrollment. When inactive, a user will still appear in the course roster to admins, but be unable to participate. ("inactivate" and "deactivate" are equivalent tasks)""" if task is not None: self._validate_enum(task, ["conclude", "delete", "inactivate", "deactivate"]) params["task"] = task self.logger.debug("DELETE /api/v1/courses/{course_id}/enrollments/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/courses/{course_id}/enrollments/{id}".format(**path), data=data, params=params, single_item=True)
python
def conclude_deactivate_or_delete_enrollment(self, id, course_id, task=None): """ Conclude, deactivate, or delete an enrollment. Conclude, deactivate, or delete an enrollment. If the +task+ argument isn't given, the enrollment will be concluded. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - task """The action to take on the enrollment. When inactive, a user will still appear in the course roster to admins, but be unable to participate. ("inactivate" and "deactivate" are equivalent tasks)""" if task is not None: self._validate_enum(task, ["conclude", "delete", "inactivate", "deactivate"]) params["task"] = task self.logger.debug("DELETE /api/v1/courses/{course_id}/enrollments/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/courses/{course_id}/enrollments/{id}".format(**path), data=data, params=params, single_item=True)
['def', 'conclude_deactivate_or_delete_enrollment', '(', 'self', ',', 'id', ',', 'course_id', ',', 'task', '=', 'None', ')', ':', 'path', '=', '{', '}', 'data', '=', '{', '}', 'params', '=', '{', '}', '# REQUIRED - PATH - course_id\r', '"""ID"""', 'path', '[', '"course_id"', ']', '=', 'course_id', '# REQUIRED - PATH - id\r', '"""ID"""', 'path', '[', '"id"', ']', '=', 'id', '# OPTIONAL - task\r', '"""The action to take on the enrollment.\r\n When inactive, a user will still appear in the course roster to admins, but be unable to participate.\r\n ("inactivate" and "deactivate" are equivalent tasks)"""', 'if', 'task', 'is', 'not', 'None', ':', 'self', '.', '_validate_enum', '(', 'task', ',', '[', '"conclude"', ',', '"delete"', ',', '"inactivate"', ',', '"deactivate"', ']', ')', 'params', '[', '"task"', ']', '=', 'task', 'self', '.', 'logger', '.', 'debug', '(', '"DELETE /api/v1/courses/{course_id}/enrollments/{id} with query params: {params} and form data: {data}"', '.', 'format', '(', 'params', '=', 'params', ',', 'data', '=', 'data', ',', '*', '*', 'path', ')', ')', 'return', 'self', '.', 'generic_request', '(', '"DELETE"', ',', '"/api/v1/courses/{course_id}/enrollments/{id}"', '.', 'format', '(', '*', '*', 'path', ')', ',', 'data', '=', 'data', ',', 'params', '=', 'params', ',', 'single_item', '=', 'True', ')']
Conclude, deactivate, or delete an enrollment. Conclude, deactivate, or delete an enrollment. If the +task+ argument isn't given, the enrollment will be concluded.
['Conclude', 'deactivate', 'or', 'delete', 'an', 'enrollment', '.', 'Conclude', 'deactivate', 'or', 'delete', 'an', 'enrollment', '.', 'If', 'the', '+', 'task', '+', 'argument', 'isn', 't', 'given', 'the', 'enrollment', 'will', 'be', 'concluded', '.']
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/enrollments.py#L521-L549
7,155
quantopian/zipline
zipline/assets/assets.py
_filter_kwargs
def _filter_kwargs(names, dict_): """Filter out kwargs from a dictionary. Parameters ---------- names : set[str] The names to select from ``dict_``. dict_ : dict[str, any] The dictionary to select from. Returns ------- kwargs : dict[str, any] ``dict_`` where the keys intersect with ``names`` and the values are not None. """ return {k: v for k, v in dict_.items() if k in names and v is not None}
python
def _filter_kwargs(names, dict_): """Filter out kwargs from a dictionary. Parameters ---------- names : set[str] The names to select from ``dict_``. dict_ : dict[str, any] The dictionary to select from. Returns ------- kwargs : dict[str, any] ``dict_`` where the keys intersect with ``names`` and the values are not None. """ return {k: v for k, v in dict_.items() if k in names and v is not None}
['def', '_filter_kwargs', '(', 'names', ',', 'dict_', ')', ':', 'return', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'dict_', '.', 'items', '(', ')', 'if', 'k', 'in', 'names', 'and', 'v', 'is', 'not', 'None', '}']
Filter out kwargs from a dictionary. Parameters ---------- names : set[str] The names to select from ``dict_``. dict_ : dict[str, any] The dictionary to select from. Returns ------- kwargs : dict[str, any] ``dict_`` where the keys intersect with ``names`` and the values are not None.
['Filter', 'out', 'kwargs', 'from', 'a', 'dictionary', '.']
train
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L193-L209
7,156
Microsoft/knack
knack/deprecation.py
Deprecated._version_less_than_or_equal_to
def _version_less_than_or_equal_to(self, v1, v2): """ Returns true if v1 <= v2. """ # pylint: disable=no-name-in-module, import-error from distutils.version import LooseVersion return LooseVersion(v1) <= LooseVersion(v2)
python
def _version_less_than_or_equal_to(self, v1, v2): """ Returns true if v1 <= v2. """ # pylint: disable=no-name-in-module, import-error from distutils.version import LooseVersion return LooseVersion(v1) <= LooseVersion(v2)
['def', '_version_less_than_or_equal_to', '(', 'self', ',', 'v1', ',', 'v2', ')', ':', '# pylint: disable=no-name-in-module, import-error', 'from', 'distutils', '.', 'version', 'import', 'LooseVersion', 'return', 'LooseVersion', '(', 'v1', ')', '<=', 'LooseVersion', '(', 'v2', ')']
Returns true if v1 <= v2.
['Returns', 'true', 'if', 'v1', '<', '=', 'v2', '.']
train
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/deprecation.py#L127-L131
7,157
LeafSoftware/python-lambder
lambder/cli.py
new
def new( name, bucket, timeout, memory, description, subnet_ids, security_group_ids ): """ Create a new lambda project """ config = {} if timeout: config['timeout'] = timeout if memory: config['memory'] = memory if description: config['description'] = description if subnet_ids: config['subnet_ids'] = subnet_ids if security_group_ids: config['security_group_ids'] = security_group_ids lambder.create_project(name, bucket, config)
python
def new( name, bucket, timeout, memory, description, subnet_ids, security_group_ids ): """ Create a new lambda project """ config = {} if timeout: config['timeout'] = timeout if memory: config['memory'] = memory if description: config['description'] = description if subnet_ids: config['subnet_ids'] = subnet_ids if security_group_ids: config['security_group_ids'] = security_group_ids lambder.create_project(name, bucket, config)
['def', 'new', '(', 'name', ',', 'bucket', ',', 'timeout', ',', 'memory', ',', 'description', ',', 'subnet_ids', ',', 'security_group_ids', ')', ':', 'config', '=', '{', '}', 'if', 'timeout', ':', 'config', '[', "'timeout'", ']', '=', 'timeout', 'if', 'memory', ':', 'config', '[', "'memory'", ']', '=', 'memory', 'if', 'description', ':', 'config', '[', "'description'", ']', '=', 'description', 'if', 'subnet_ids', ':', 'config', '[', "'subnet_ids'", ']', '=', 'subnet_ids', 'if', 'security_group_ids', ':', 'config', '[', "'security_group_ids'", ']', '=', 'security_group_ids', 'lambder', '.', 'create_project', '(', 'name', ',', 'bucket', ',', 'config', ')']
Create a new lambda project
['Create', 'a', 'new', 'lambda', 'project']
train
https://github.com/LeafSoftware/python-lambder/blob/1c50b5dd2af286286e1547ee87d815d66382b884/lambder/cli.py#L133-L155
7,158
LonamiWebs/Telethon
telethon_examples/assistant.py
handler
async def handler(event): """#learn or #python: Tells the user to learn some Python first.""" await asyncio.wait([ event.delete(), event.respond( LEARN_PYTHON, reply_to=event.reply_to_msg_id, link_preview=False) ])
python
async def handler(event): """#learn or #python: Tells the user to learn some Python first.""" await asyncio.wait([ event.delete(), event.respond( LEARN_PYTHON, reply_to=event.reply_to_msg_id, link_preview=False) ])
['async', 'def', 'handler', '(', 'event', ')', ':', 'await', 'asyncio', '.', 'wait', '(', '[', 'event', '.', 'delete', '(', ')', ',', 'event', '.', 'respond', '(', 'LEARN_PYTHON', ',', 'reply_to', '=', 'event', '.', 'reply_to_msg_id', ',', 'link_preview', '=', 'False', ')', ']', ')']
#learn or #python: Tells the user to learn some Python first.
['#learn', 'or', '#python', ':', 'Tells', 'the', 'user', 'to', 'learn', 'some', 'Python', 'first', '.']
train
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_examples/assistant.py#L301-L307
7,159
ray-project/ray
python/ray/actor.py
get_checkpoints_for_actor
def get_checkpoints_for_actor(actor_id): """Get the available checkpoints for the given actor ID, return a list sorted by checkpoint timestamp in descending order. """ checkpoint_info = ray.worker.global_state.actor_checkpoint_info(actor_id) if checkpoint_info is None: return [] checkpoints = [ Checkpoint(checkpoint_id, timestamp) for checkpoint_id, timestamp in zip(checkpoint_info["CheckpointIds"], checkpoint_info["Timestamps"]) ] return sorted( checkpoints, key=lambda checkpoint: checkpoint.timestamp, reverse=True, )
python
def get_checkpoints_for_actor(actor_id): """Get the available checkpoints for the given actor ID, return a list sorted by checkpoint timestamp in descending order. """ checkpoint_info = ray.worker.global_state.actor_checkpoint_info(actor_id) if checkpoint_info is None: return [] checkpoints = [ Checkpoint(checkpoint_id, timestamp) for checkpoint_id, timestamp in zip(checkpoint_info["CheckpointIds"], checkpoint_info["Timestamps"]) ] return sorted( checkpoints, key=lambda checkpoint: checkpoint.timestamp, reverse=True, )
['def', 'get_checkpoints_for_actor', '(', 'actor_id', ')', ':', 'checkpoint_info', '=', 'ray', '.', 'worker', '.', 'global_state', '.', 'actor_checkpoint_info', '(', 'actor_id', ')', 'if', 'checkpoint_info', 'is', 'None', ':', 'return', '[', ']', 'checkpoints', '=', '[', 'Checkpoint', '(', 'checkpoint_id', ',', 'timestamp', ')', 'for', 'checkpoint_id', ',', 'timestamp', 'in', 'zip', '(', 'checkpoint_info', '[', '"CheckpointIds"', ']', ',', 'checkpoint_info', '[', '"Timestamps"', ']', ')', ']', 'return', 'sorted', '(', 'checkpoints', ',', 'key', '=', 'lambda', 'checkpoint', ':', 'checkpoint', '.', 'timestamp', ',', 'reverse', '=', 'True', ',', ')']
Get the available checkpoints for the given actor ID, return a list sorted by checkpoint timestamp in descending order.
['Get', 'the', 'available', 'checkpoints', 'for', 'the', 'given', 'actor', 'ID', 'return', 'a', 'list', 'sorted', 'by', 'checkpoint', 'timestamp', 'in', 'descending', 'order', '.']
train
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/actor.py#L869-L884
7,160
twaldear/flask-secure-headers
flask_secure_headers/headers.py
Simple_Header.check_valid
def check_valid(self): """ check if input is valid """ for k,input in self.inputs.items(): if k in self.valid_opts: for param in self.valid_opts[k]: if param is None or input is None: return True elif type(param) is str and '+' in param: if re.search(r'^'+param,str(input)): return True elif type(param) is bool and type(input) is bool: return True elif type(param) is list and type(input) is list: return True else: if str(input).lower() == str(param): return True raise ValueError("Invalid input for '%s' parameter. Options are: %s" % (k,' '.join(["'%s'," % str(o) for o in self.valid_opts[k]]) )) else: raise ValueError("Invalid parameter for '%s'. Params are: %s" % (self.__class__.__name__,', '.join(["'%s'" % p for p in self.valid_opts.keys()]) ))
python
def check_valid(self): """ check if input is valid """ for k,input in self.inputs.items(): if k in self.valid_opts: for param in self.valid_opts[k]: if param is None or input is None: return True elif type(param) is str and '+' in param: if re.search(r'^'+param,str(input)): return True elif type(param) is bool and type(input) is bool: return True elif type(param) is list and type(input) is list: return True else: if str(input).lower() == str(param): return True raise ValueError("Invalid input for '%s' parameter. Options are: %s" % (k,' '.join(["'%s'," % str(o) for o in self.valid_opts[k]]) )) else: raise ValueError("Invalid parameter for '%s'. Params are: %s" % (self.__class__.__name__,', '.join(["'%s'" % p for p in self.valid_opts.keys()]) ))
['def', 'check_valid', '(', 'self', ')', ':', 'for', 'k', ',', 'input', 'in', 'self', '.', 'inputs', '.', 'items', '(', ')', ':', 'if', 'k', 'in', 'self', '.', 'valid_opts', ':', 'for', 'param', 'in', 'self', '.', 'valid_opts', '[', 'k', ']', ':', 'if', 'param', 'is', 'None', 'or', 'input', 'is', 'None', ':', 'return', 'True', 'elif', 'type', '(', 'param', ')', 'is', 'str', 'and', "'+'", 'in', 'param', ':', 'if', 're', '.', 'search', '(', "r'^'", '+', 'param', ',', 'str', '(', 'input', ')', ')', ':', 'return', 'True', 'elif', 'type', '(', 'param', ')', 'is', 'bool', 'and', 'type', '(', 'input', ')', 'is', 'bool', ':', 'return', 'True', 'elif', 'type', '(', 'param', ')', 'is', 'list', 'and', 'type', '(', 'input', ')', 'is', 'list', ':', 'return', 'True', 'else', ':', 'if', 'str', '(', 'input', ')', '.', 'lower', '(', ')', '==', 'str', '(', 'param', ')', ':', 'return', 'True', 'raise', 'ValueError', '(', '"Invalid input for \'%s\' parameter. Options are: %s"', '%', '(', 'k', ',', "' '", '.', 'join', '(', '[', '"\'%s\',"', '%', 'str', '(', 'o', ')', 'for', 'o', 'in', 'self', '.', 'valid_opts', '[', 'k', ']', ']', ')', ')', ')', 'else', ':', 'raise', 'ValueError', '(', '"Invalid parameter for \'%s\'. Params are: %s"', '%', '(', 'self', '.', '__class__', '.', '__name__', ',', "', '", '.', 'join', '(', '[', '"\'%s\'"', '%', 'p', 'for', 'p', 'in', 'self', '.', 'valid_opts', '.', 'keys', '(', ')', ']', ')', ')', ')']
check if input is valid
['check', 'if', 'input', 'is', 'valid']
train
https://github.com/twaldear/flask-secure-headers/blob/3eca972b369608a7669b67cbe66679570a6505ce/flask_secure_headers/headers.py#L5-L24
7,161
python-escpos/python-escpos
src/escpos/magicencode.py
Encoder.encode
def encode(self, text, encoding, defaultchar='?'): """ Encode text under the given encoding :param text: Text to encode :param encoding: Encoding name to use (must be defined in capabilities) :param defaultchar: Fallback for non-encodable characters """ codepage_char_map = self._get_codepage_char_map(encoding) output_bytes = bytes([self._encode_char(char, codepage_char_map, defaultchar) for char in text]) return output_bytes
python
def encode(self, text, encoding, defaultchar='?'): """ Encode text under the given encoding :param text: Text to encode :param encoding: Encoding name to use (must be defined in capabilities) :param defaultchar: Fallback for non-encodable characters """ codepage_char_map = self._get_codepage_char_map(encoding) output_bytes = bytes([self._encode_char(char, codepage_char_map, defaultchar) for char in text]) return output_bytes
['def', 'encode', '(', 'self', ',', 'text', ',', 'encoding', ',', 'defaultchar', '=', "'?'", ')', ':', 'codepage_char_map', '=', 'self', '.', '_get_codepage_char_map', '(', 'encoding', ')', 'output_bytes', '=', 'bytes', '(', '[', 'self', '.', '_encode_char', '(', 'char', ',', 'codepage_char_map', ',', 'defaultchar', ')', 'for', 'char', 'in', 'text', ']', ')', 'return', 'output_bytes']
Encode text under the given encoding :param text: Text to encode :param encoding: Encoding name to use (must be defined in capabilities) :param defaultchar: Fallback for non-encodable characters
['Encode', 'text', 'under', 'the', 'given', 'encoding']
train
https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/magicencode.py#L141-L150
7,162
mikedh/trimesh
trimesh/visual/color.py
interpolate
def interpolate(values, color_map=None, dtype=np.uint8): """ Given a 1D list of values, return interpolated colors for the range. Parameters --------------- values : (n, ) float Values to be interpolated over color_map : None, or str Key to a colormap contained in: matplotlib.pyplot.colormaps() e.g: 'viridis' Returns ------------- interpolated : (n, 4) dtype Interpolated RGBA colors """ # get a color interpolation function if color_map is None: cmap = linear_color_map else: from matplotlib.pyplot import get_cmap cmap = get_cmap(color_map) # make input always float values = np.asanyarray(values, dtype=np.float64).ravel() # scale values to 0.0 - 1.0 and get colors colors = cmap((values - values.min()) / values.ptp()) # convert to 0-255 RGBA rgba = to_rgba(colors, dtype=dtype) return rgba
python
def interpolate(values, color_map=None, dtype=np.uint8): """ Given a 1D list of values, return interpolated colors for the range. Parameters --------------- values : (n, ) float Values to be interpolated over color_map : None, or str Key to a colormap contained in: matplotlib.pyplot.colormaps() e.g: 'viridis' Returns ------------- interpolated : (n, 4) dtype Interpolated RGBA colors """ # get a color interpolation function if color_map is None: cmap = linear_color_map else: from matplotlib.pyplot import get_cmap cmap = get_cmap(color_map) # make input always float values = np.asanyarray(values, dtype=np.float64).ravel() # scale values to 0.0 - 1.0 and get colors colors = cmap((values - values.min()) / values.ptp()) # convert to 0-255 RGBA rgba = to_rgba(colors, dtype=dtype) return rgba
['def', 'interpolate', '(', 'values', ',', 'color_map', '=', 'None', ',', 'dtype', '=', 'np', '.', 'uint8', ')', ':', '# get a color interpolation function', 'if', 'color_map', 'is', 'None', ':', 'cmap', '=', 'linear_color_map', 'else', ':', 'from', 'matplotlib', '.', 'pyplot', 'import', 'get_cmap', 'cmap', '=', 'get_cmap', '(', 'color_map', ')', '# make input always float', 'values', '=', 'np', '.', 'asanyarray', '(', 'values', ',', 'dtype', '=', 'np', '.', 'float64', ')', '.', 'ravel', '(', ')', '# scale values to 0.0 - 1.0 and get colors', 'colors', '=', 'cmap', '(', '(', 'values', '-', 'values', '.', 'min', '(', ')', ')', '/', 'values', '.', 'ptp', '(', ')', ')', '# convert to 0-255 RGBA', 'rgba', '=', 'to_rgba', '(', 'colors', ',', 'dtype', '=', 'dtype', ')', 'return', 'rgba']
Given a 1D list of values, return interpolated colors for the range. Parameters --------------- values : (n, ) float Values to be interpolated over color_map : None, or str Key to a colormap contained in: matplotlib.pyplot.colormaps() e.g: 'viridis' Returns ------------- interpolated : (n, 4) dtype Interpolated RGBA colors
['Given', 'a', '1D', 'list', 'of', 'values', 'return', 'interpolated', 'colors', 'for', 'the', 'range', '.']
train
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/visual/color.py#L703-L737
7,163
opendatateam/udata
udata/harvest/actions.py
schedule
def schedule(ident, cron=None, minute='*', hour='*', day_of_week='*', day_of_month='*', month_of_year='*'): '''Schedule an harvesting on a source given a crontab''' source = get_source(ident) if cron: minute, hour, day_of_month, month_of_year, day_of_week = cron.split() crontab = PeriodicTask.Crontab( minute=str(minute), hour=str(hour), day_of_week=str(day_of_week), day_of_month=str(day_of_month), month_of_year=str(month_of_year) ) if source.periodic_task: source.periodic_task.modify(crontab=crontab) else: source.modify(periodic_task=PeriodicTask.objects.create( task='harvest', name='Harvest {0}'.format(source.name), description='Periodic Harvesting', enabled=True, args=[str(source.id)], crontab=crontab, )) signals.harvest_source_scheduled.send(source) return source
python
def schedule(ident, cron=None, minute='*', hour='*', day_of_week='*', day_of_month='*', month_of_year='*'): '''Schedule an harvesting on a source given a crontab''' source = get_source(ident) if cron: minute, hour, day_of_month, month_of_year, day_of_week = cron.split() crontab = PeriodicTask.Crontab( minute=str(minute), hour=str(hour), day_of_week=str(day_of_week), day_of_month=str(day_of_month), month_of_year=str(month_of_year) ) if source.periodic_task: source.periodic_task.modify(crontab=crontab) else: source.modify(periodic_task=PeriodicTask.objects.create( task='harvest', name='Harvest {0}'.format(source.name), description='Periodic Harvesting', enabled=True, args=[str(source.id)], crontab=crontab, )) signals.harvest_source_scheduled.send(source) return source
['def', 'schedule', '(', 'ident', ',', 'cron', '=', 'None', ',', 'minute', '=', "'*'", ',', 'hour', '=', "'*'", ',', 'day_of_week', '=', "'*'", ',', 'day_of_month', '=', "'*'", ',', 'month_of_year', '=', "'*'", ')', ':', 'source', '=', 'get_source', '(', 'ident', ')', 'if', 'cron', ':', 'minute', ',', 'hour', ',', 'day_of_month', ',', 'month_of_year', ',', 'day_of_week', '=', 'cron', '.', 'split', '(', ')', 'crontab', '=', 'PeriodicTask', '.', 'Crontab', '(', 'minute', '=', 'str', '(', 'minute', ')', ',', 'hour', '=', 'str', '(', 'hour', ')', ',', 'day_of_week', '=', 'str', '(', 'day_of_week', ')', ',', 'day_of_month', '=', 'str', '(', 'day_of_month', ')', ',', 'month_of_year', '=', 'str', '(', 'month_of_year', ')', ')', 'if', 'source', '.', 'periodic_task', ':', 'source', '.', 'periodic_task', '.', 'modify', '(', 'crontab', '=', 'crontab', ')', 'else', ':', 'source', '.', 'modify', '(', 'periodic_task', '=', 'PeriodicTask', '.', 'objects', '.', 'create', '(', 'task', '=', "'harvest'", ',', 'name', '=', "'Harvest {0}'", '.', 'format', '(', 'source', '.', 'name', ')', ',', 'description', '=', "'Periodic Harvesting'", ',', 'enabled', '=', 'True', ',', 'args', '=', '[', 'str', '(', 'source', '.', 'id', ')', ']', ',', 'crontab', '=', 'crontab', ',', ')', ')', 'signals', '.', 'harvest_source_scheduled', '.', 'send', '(', 'source', ')', 'return', 'source']
Schedule an harvesting on a source given a crontab
['Schedule', 'an', 'harvesting', 'on', 'a', 'source', 'given', 'a', 'crontab']
train
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L190-L218
7,164
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_all_columns_are_used
def ensure_all_columns_are_used(num_vars_accounted_for, dataframe, data_title='long_data'): """ Ensure that all of the columns from dataframe are in the list of used_cols. Will raise a helpful UserWarning if otherwise. Parameters ---------- num_vars_accounted_for : int. Denotes the number of variables used in one's function. dataframe : pandas dataframe. Contains all of the data to be converted from one format to another. data_title : str, optional. Denotes the title by which `dataframe` should be referred in the UserWarning. Returns ------- None. """ dataframe_vars = set(dataframe.columns.tolist()) num_dataframe_vars = len(dataframe_vars) if num_vars_accounted_for == num_dataframe_vars: pass elif num_vars_accounted_for < num_dataframe_vars: msg = "Note, there are {:,} variables in {} but the inputs" msg_2 = " ind_vars, alt_specific_vars, and subset_specific_vars only" msg_3 = " account for {:,} variables." warnings.warn(msg.format(num_dataframe_vars, data_title) + msg_2 + msg_3.format(num_vars_accounted_for)) else: # This means num_vars_accounted_for > num_dataframe_vars msg = "There are more variable specified in ind_vars, " msg_2 = "alt_specific_vars, and subset_specific_vars ({:,}) than there" msg_3 = " are variables in {} ({:,})" warnings.warn(msg + msg_2.format(num_vars_accounted_for) + msg_3.format(data_title, num_dataframe_vars)) return None
python
def ensure_all_columns_are_used(num_vars_accounted_for, dataframe, data_title='long_data'): """ Ensure that all of the columns from dataframe are in the list of used_cols. Will raise a helpful UserWarning if otherwise. Parameters ---------- num_vars_accounted_for : int. Denotes the number of variables used in one's function. dataframe : pandas dataframe. Contains all of the data to be converted from one format to another. data_title : str, optional. Denotes the title by which `dataframe` should be referred in the UserWarning. Returns ------- None. """ dataframe_vars = set(dataframe.columns.tolist()) num_dataframe_vars = len(dataframe_vars) if num_vars_accounted_for == num_dataframe_vars: pass elif num_vars_accounted_for < num_dataframe_vars: msg = "Note, there are {:,} variables in {} but the inputs" msg_2 = " ind_vars, alt_specific_vars, and subset_specific_vars only" msg_3 = " account for {:,} variables." warnings.warn(msg.format(num_dataframe_vars, data_title) + msg_2 + msg_3.format(num_vars_accounted_for)) else: # This means num_vars_accounted_for > num_dataframe_vars msg = "There are more variable specified in ind_vars, " msg_2 = "alt_specific_vars, and subset_specific_vars ({:,}) than there" msg_3 = " are variables in {} ({:,})" warnings.warn(msg + msg_2.format(num_vars_accounted_for) + msg_3.format(data_title, num_dataframe_vars)) return None
['def', 'ensure_all_columns_are_used', '(', 'num_vars_accounted_for', ',', 'dataframe', ',', 'data_title', '=', "'long_data'", ')', ':', 'dataframe_vars', '=', 'set', '(', 'dataframe', '.', 'columns', '.', 'tolist', '(', ')', ')', 'num_dataframe_vars', '=', 'len', '(', 'dataframe_vars', ')', 'if', 'num_vars_accounted_for', '==', 'num_dataframe_vars', ':', 'pass', 'elif', 'num_vars_accounted_for', '<', 'num_dataframe_vars', ':', 'msg', '=', '"Note, there are {:,} variables in {} but the inputs"', 'msg_2', '=', '" ind_vars, alt_specific_vars, and subset_specific_vars only"', 'msg_3', '=', '" account for {:,} variables."', 'warnings', '.', 'warn', '(', 'msg', '.', 'format', '(', 'num_dataframe_vars', ',', 'data_title', ')', '+', 'msg_2', '+', 'msg_3', '.', 'format', '(', 'num_vars_accounted_for', ')', ')', 'else', ':', '# This means num_vars_accounted_for > num_dataframe_vars', 'msg', '=', '"There are more variable specified in ind_vars, "', 'msg_2', '=', '"alt_specific_vars, and subset_specific_vars ({:,}) than there"', 'msg_3', '=', '" are variables in {} ({:,})"', 'warnings', '.', 'warn', '(', 'msg', '+', 'msg_2', '.', 'format', '(', 'num_vars_accounted_for', ')', '+', 'msg_3', '.', 'format', '(', 'data_title', ',', 'num_dataframe_vars', ')', ')', 'return', 'None']
Ensure that all of the columns from dataframe are in the list of used_cols. Will raise a helpful UserWarning if otherwise. Parameters ---------- num_vars_accounted_for : int. Denotes the number of variables used in one's function. dataframe : pandas dataframe. Contains all of the data to be converted from one format to another. data_title : str, optional. Denotes the title by which `dataframe` should be referred in the UserWarning. Returns ------- None.
['Ensure', 'that', 'all', 'of', 'the', 'columns', 'from', 'dataframe', 'are', 'in', 'the', 'list', 'of', 'used_cols', '.', 'Will', 'raise', 'a', 'helpful', 'UserWarning', 'if', 'otherwise', '.']
train
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L420-L463
7,165
python-rope/rope
rope/base/resources.py
Folder.get_children
def get_children(self): """Return the children of this folder""" try: children = os.listdir(self.real_path) except OSError: return [] result = [] for name in children: try: child = self.get_child(name) except exceptions.ResourceNotFoundError: continue if not self.project.is_ignored(child): result.append(self.get_child(name)) return result
python
def get_children(self): """Return the children of this folder""" try: children = os.listdir(self.real_path) except OSError: return [] result = [] for name in children: try: child = self.get_child(name) except exceptions.ResourceNotFoundError: continue if not self.project.is_ignored(child): result.append(self.get_child(name)) return result
['def', 'get_children', '(', 'self', ')', ':', 'try', ':', 'children', '=', 'os', '.', 'listdir', '(', 'self', '.', 'real_path', ')', 'except', 'OSError', ':', 'return', '[', ']', 'result', '=', '[', ']', 'for', 'name', 'in', 'children', ':', 'try', ':', 'child', '=', 'self', '.', 'get_child', '(', 'name', ')', 'except', 'exceptions', '.', 'ResourceNotFoundError', ':', 'continue', 'if', 'not', 'self', '.', 'project', '.', 'is_ignored', '(', 'child', ')', ':', 'result', '.', 'append', '(', 'self', '.', 'get_child', '(', 'name', ')', ')', 'return', 'result']
Return the children of this folder
['Return', 'the', 'children', 'of', 'this', 'folder']
train
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/resources.py#L147-L161
7,166
tcalmant/ipopo
pelix/ipopo/handlers/temporal.py
TemporalDependency.on_service_arrival
def on_service_arrival(self, svc_ref): """ Called when a service has been registered in the framework :param svc_ref: A service reference """ with self._lock: if self.reference is None: # Inject the service service = self._context.get_service(svc_ref) self.reference = svc_ref self._value.set_service(service) self.__still_valid = True # Cancel timer self.__cancel_timer() # Bind the service self._ipopo_instance.bind(self, self._value, self.reference) return True return None
python
def on_service_arrival(self, svc_ref): """ Called when a service has been registered in the framework :param svc_ref: A service reference """ with self._lock: if self.reference is None: # Inject the service service = self._context.get_service(svc_ref) self.reference = svc_ref self._value.set_service(service) self.__still_valid = True # Cancel timer self.__cancel_timer() # Bind the service self._ipopo_instance.bind(self, self._value, self.reference) return True return None
['def', 'on_service_arrival', '(', 'self', ',', 'svc_ref', ')', ':', 'with', 'self', '.', '_lock', ':', 'if', 'self', '.', 'reference', 'is', 'None', ':', '# Inject the service', 'service', '=', 'self', '.', '_context', '.', 'get_service', '(', 'svc_ref', ')', 'self', '.', 'reference', '=', 'svc_ref', 'self', '.', '_value', '.', 'set_service', '(', 'service', ')', 'self', '.', '__still_valid', '=', 'True', '# Cancel timer', 'self', '.', '__cancel_timer', '(', ')', '# Bind the service', 'self', '.', '_ipopo_instance', '.', 'bind', '(', 'self', ',', 'self', '.', '_value', ',', 'self', '.', 'reference', ')', 'return', 'True', 'return', 'None']
Called when a service has been registered in the framework :param svc_ref: A service reference
['Called', 'when', 'a', 'service', 'has', 'been', 'registered', 'in', 'the', 'framework']
train
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/temporal.py#L289-L310
7,167
pycontribs/pyrax
pyrax/object_storage.py
ContainerManager.set_metadata
def set_metadata(self, container, metadata, clear=False, prefix=None): """ Accepts a dictionary of metadata key/value pairs and updates the specified container metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the container's metadata. By default, the standard container metadata prefix ('X-Container-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string. """ # Add the metadata prefix, if needed. if prefix is None: prefix = CONTAINER_META_PREFIX massaged = _massage_metakeys(metadata, prefix) new_meta = {} if clear: curr_meta = self.api.get_container_metadata(container, prefix=prefix) for ckey in curr_meta: new_meta[ckey] = "" utils.case_insensitive_update(new_meta, massaged) name = utils.get_name(container) uri = "/%s" % name resp, resp_body = self.api.method_post(uri, headers=new_meta) return 200 <= resp.status_code <= 299
python
def set_metadata(self, container, metadata, clear=False, prefix=None): """ Accepts a dictionary of metadata key/value pairs and updates the specified container metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the container's metadata. By default, the standard container metadata prefix ('X-Container-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string. """ # Add the metadata prefix, if needed. if prefix is None: prefix = CONTAINER_META_PREFIX massaged = _massage_metakeys(metadata, prefix) new_meta = {} if clear: curr_meta = self.api.get_container_metadata(container, prefix=prefix) for ckey in curr_meta: new_meta[ckey] = "" utils.case_insensitive_update(new_meta, massaged) name = utils.get_name(container) uri = "/%s" % name resp, resp_body = self.api.method_post(uri, headers=new_meta) return 200 <= resp.status_code <= 299
['def', 'set_metadata', '(', 'self', ',', 'container', ',', 'metadata', ',', 'clear', '=', 'False', ',', 'prefix', '=', 'None', ')', ':', '# Add the metadata prefix, if needed.', 'if', 'prefix', 'is', 'None', ':', 'prefix', '=', 'CONTAINER_META_PREFIX', 'massaged', '=', '_massage_metakeys', '(', 'metadata', ',', 'prefix', ')', 'new_meta', '=', '{', '}', 'if', 'clear', ':', 'curr_meta', '=', 'self', '.', 'api', '.', 'get_container_metadata', '(', 'container', ',', 'prefix', '=', 'prefix', ')', 'for', 'ckey', 'in', 'curr_meta', ':', 'new_meta', '[', 'ckey', ']', '=', '""', 'utils', '.', 'case_insensitive_update', '(', 'new_meta', ',', 'massaged', ')', 'name', '=', 'utils', '.', 'get_name', '(', 'container', ')', 'uri', '=', '"/%s"', '%', 'name', 'resp', ',', 'resp_body', '=', 'self', '.', 'api', '.', 'method_post', '(', 'uri', ',', 'headers', '=', 'new_meta', ')', 'return', '200', '<=', 'resp', '.', 'status_code', '<=', '299']
Accepts a dictionary of metadata key/value pairs and updates the specified container metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the container's metadata. By default, the standard container metadata prefix ('X-Container-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string.
['Accepts', 'a', 'dictionary', 'of', 'metadata', 'key', '/', 'value', 'pairs', 'and', 'updates', 'the', 'specified', 'container', 'metadata', 'with', 'them', '.']
train
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L985-L1013
7,168
openstack/networking-cisco
networking_cisco/apps/saf/server/dfa_openstack_helper.py
DfaNeutronHelper.get_fw_rule
def get_fw_rule(self, rule_id): """Return the firewall rule, given its ID. """ rule = None try: rule = self.neutronclient.show_firewall_rule(rule_id) except Exception as exc: LOG.error("Failed to get firewall rule for id %(id)s " "Exc %(exc)s", {'id': rule_id, 'exc': str(exc)}) return rule
python
def get_fw_rule(self, rule_id): """Return the firewall rule, given its ID. """ rule = None try: rule = self.neutronclient.show_firewall_rule(rule_id) except Exception as exc: LOG.error("Failed to get firewall rule for id %(id)s " "Exc %(exc)s", {'id': rule_id, 'exc': str(exc)}) return rule
['def', 'get_fw_rule', '(', 'self', ',', 'rule_id', ')', ':', 'rule', '=', 'None', 'try', ':', 'rule', '=', 'self', '.', 'neutronclient', '.', 'show_firewall_rule', '(', 'rule_id', ')', 'except', 'Exception', 'as', 'exc', ':', 'LOG', '.', 'error', '(', '"Failed to get firewall rule for id %(id)s "', '"Exc %(exc)s"', ',', '{', "'id'", ':', 'rule_id', ',', "'exc'", ':', 'str', '(', 'exc', ')', '}', ')', 'return', 'rule']
Return the firewall rule, given its ID.
['Return', 'the', 'firewall', 'rule', 'given', 'its', 'ID', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L513-L521
7,169
codeinn/vcs
vcs/nodes.py
FileNode.content
def content(self): """ Returns lazily content of the FileNode. If possible, would try to decode content from UTF-8. """ content = self._get_content() if bool(content and '\0' in content): return content return safe_unicode(content)
python
def content(self): """ Returns lazily content of the FileNode. If possible, would try to decode content from UTF-8. """ content = self._get_content() if bool(content and '\0' in content): return content return safe_unicode(content)
['def', 'content', '(', 'self', ')', ':', 'content', '=', 'self', '.', '_get_content', '(', ')', 'if', 'bool', '(', 'content', 'and', "'\\0'", 'in', 'content', ')', ':', 'return', 'content', 'return', 'safe_unicode', '(', 'content', ')']
Returns lazily content of the FileNode. If possible, would try to decode content from UTF-8.
['Returns', 'lazily', 'content', 'of', 'the', 'FileNode', '.', 'If', 'possible', 'would', 'try', 'to', 'decode', 'content', 'from', 'UTF', '-', '8', '.']
train
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/nodes.py#L276-L285
7,170
welbornprod/colr
colr/__main__.py
try_rgb
def try_rgb(s, default=None): """ Try parsing a string into an rgb value (int, int, int), where the ints are 0-255 inclusive. If None is passed, default is returned. On failure, InvalidArg is raised. """ if not s: return default try: r, g, b = (int(x.strip()) for x in s.split(',')) except ValueError: raise InvalidRgb(s) if not all(in_range(x, 0, 255) for x in (r, g, b)): raise InvalidRgb(s) return r, g, b
python
def try_rgb(s, default=None): """ Try parsing a string into an rgb value (int, int, int), where the ints are 0-255 inclusive. If None is passed, default is returned. On failure, InvalidArg is raised. """ if not s: return default try: r, g, b = (int(x.strip()) for x in s.split(',')) except ValueError: raise InvalidRgb(s) if not all(in_range(x, 0, 255) for x in (r, g, b)): raise InvalidRgb(s) return r, g, b
['def', 'try_rgb', '(', 's', ',', 'default', '=', 'None', ')', ':', 'if', 'not', 's', ':', 'return', 'default', 'try', ':', 'r', ',', 'g', ',', 'b', '=', '(', 'int', '(', 'x', '.', 'strip', '(', ')', ')', 'for', 'x', 'in', 's', '.', 'split', '(', "','", ')', ')', 'except', 'ValueError', ':', 'raise', 'InvalidRgb', '(', 's', ')', 'if', 'not', 'all', '(', 'in_range', '(', 'x', ',', '0', ',', '255', ')', 'for', 'x', 'in', '(', 'r', ',', 'g', ',', 'b', ')', ')', ':', 'raise', 'InvalidRgb', '(', 's', ')', 'return', 'r', ',', 'g', ',', 'b']
Try parsing a string into an rgb value (int, int, int), where the ints are 0-255 inclusive. If None is passed, default is returned. On failure, InvalidArg is raised.
['Try', 'parsing', 'a', 'string', 'into', 'an', 'rgb', 'value', '(', 'int', 'int', 'int', ')', 'where', 'the', 'ints', 'are', '0', '-', '255', 'inclusive', '.', 'If', 'None', 'is', 'passed', 'default', 'is', 'returned', '.', 'On', 'failure', 'InvalidArg', 'is', 'raised', '.']
train
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/__main__.py#L528-L543
7,171
pywbem/pywbem
wbemcli.py
iqi
def iqi(ql, qs, ns=None, rc=None, ot=None, coe=None, moc=DEFAULT_ITER_MAXOBJECTCOUNT,): # pylint: disable=line-too-long """ *New in pywbem 0.10 as experimental and finalized in 0.12.* This function is a wrapper for :meth:`~pywbem.WBEMConnection.IterQueryInstances`. Execute a query in a namespace, using the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation, and using the Python :term:`py:generator` idiom to return the result. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. Other than the other i...() functions, this function does not return a generator object directly, but as a property of the returned object. Parameters: ql (:term:`string`): Name of the query language used in the `qs` parameter, e.g. "DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query Language. Because this is not a filter query, "DMTF:FQL" is not a valid query language for this request. qs (:term:`string`): Query string in the query language specified in the `ql` parameter. ns (:term:`string`): Name of the CIM namespace to be used (case independent). If `None`, defaults to the default namespace of the connection. rc (:class:`py:bool`): Controls whether a class definition describing the properties of the returned instances will be returned. `None` will cause the server to use its default of `False`. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. Zero and `None` are not allowed. Returns: :class:`~pywbem.IterQueryInstancesReturn`: An object with the following properties: * **query_result_class** (:class:`~pywbem.CIMClass`): The query result class, if requested via the `rc` parameter. `None`, if a query result class was not requested. * **generator** (:term:`py:generator` iterating :class:`~pywbem.CIMInstance`): A generator object that iterates the CIM instances representing the query result. These instances do not have an instance path set. """ # noqa: E501 return CONN.IterQueryInstances(FilterQueryLanguage=ql, FilterQuery=qs, namespace=ns, ReturnQueryResultClass=rc, OperationTimeout=ot, ContinueOnError=coe, MaxObjectCount=moc)
python
def iqi(ql, qs, ns=None, rc=None, ot=None, coe=None, moc=DEFAULT_ITER_MAXOBJECTCOUNT,): # pylint: disable=line-too-long """ *New in pywbem 0.10 as experimental and finalized in 0.12.* This function is a wrapper for :meth:`~pywbem.WBEMConnection.IterQueryInstances`. Execute a query in a namespace, using the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation, and using the Python :term:`py:generator` idiom to return the result. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. Other than the other i...() functions, this function does not return a generator object directly, but as a property of the returned object. Parameters: ql (:term:`string`): Name of the query language used in the `qs` parameter, e.g. "DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query Language. Because this is not a filter query, "DMTF:FQL" is not a valid query language for this request. qs (:term:`string`): Query string in the query language specified in the `ql` parameter. ns (:term:`string`): Name of the CIM namespace to be used (case independent). If `None`, defaults to the default namespace of the connection. rc (:class:`py:bool`): Controls whether a class definition describing the properties of the returned instances will be returned. `None` will cause the server to use its default of `False`. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. Zero and `None` are not allowed. Returns: :class:`~pywbem.IterQueryInstancesReturn`: An object with the following properties: * **query_result_class** (:class:`~pywbem.CIMClass`): The query result class, if requested via the `rc` parameter. `None`, if a query result class was not requested. * **generator** (:term:`py:generator` iterating :class:`~pywbem.CIMInstance`): A generator object that iterates the CIM instances representing the query result. These instances do not have an instance path set. """ # noqa: E501 return CONN.IterQueryInstances(FilterQueryLanguage=ql, FilterQuery=qs, namespace=ns, ReturnQueryResultClass=rc, OperationTimeout=ot, ContinueOnError=coe, MaxObjectCount=moc)
['def', 'iqi', '(', 'ql', ',', 'qs', ',', 'ns', '=', 'None', ',', 'rc', '=', 'None', ',', 'ot', '=', 'None', ',', 'coe', '=', 'None', ',', 'moc', '=', 'DEFAULT_ITER_MAXOBJECTCOUNT', ',', ')', ':', '# pylint: disable=line-too-long', '# noqa: E501', 'return', 'CONN', '.', 'IterQueryInstances', '(', 'FilterQueryLanguage', '=', 'ql', ',', 'FilterQuery', '=', 'qs', ',', 'namespace', '=', 'ns', ',', 'ReturnQueryResultClass', '=', 'rc', ',', 'OperationTimeout', '=', 'ot', ',', 'ContinueOnError', '=', 'coe', ',', 'MaxObjectCount', '=', 'moc', ')']
*New in pywbem 0.10 as experimental and finalized in 0.12.* This function is a wrapper for :meth:`~pywbem.WBEMConnection.IterQueryInstances`. Execute a query in a namespace, using the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation, and using the Python :term:`py:generator` idiom to return the result. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. Other than the other i...() functions, this function does not return a generator object directly, but as a property of the returned object. Parameters: ql (:term:`string`): Name of the query language used in the `qs` parameter, e.g. "DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query Language. Because this is not a filter query, "DMTF:FQL" is not a valid query language for this request. qs (:term:`string`): Query string in the query language specified in the `ql` parameter. ns (:term:`string`): Name of the CIM namespace to be used (case independent). If `None`, defaults to the default namespace of the connection. rc (:class:`py:bool`): Controls whether a class definition describing the properties of the returned instances will be returned. `None` will cause the server to use its default of `False`. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. Zero and `None` are not allowed. Returns: :class:`~pywbem.IterQueryInstancesReturn`: An object with the following properties: * **query_result_class** (:class:`~pywbem.CIMClass`): The query result class, if requested via the `rc` parameter. `None`, if a query result class was not requested. * **generator** (:term:`py:generator` iterating :class:`~pywbem.CIMInstance`): A generator object that iterates the CIM instances representing the query result. These instances do not have an instance path set.
['*', 'New', 'in', 'pywbem', '0', '.', '10', 'as', 'experimental', 'and', 'finalized', 'in', '0', '.', '12', '.', '*']
train
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/wbemcli.py#L1622-L1711
7,172
tkrajina/TileStitcher
tilestitcher/__init__.py
SlippyMapTiles._deg2num
def _deg2num(self, lat_deg, lon_deg, zoom, leave_float=False): """ Taken from http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Python """ lat_rad = mod_math.radians(lat_deg) n = 2.0 ** zoom xtile = (lon_deg + 180.0) / 360.0 * n ytile = (1.0 - mod_math.log(mod_math.tan(lat_rad) + (1 / mod_math.cos(lat_rad))) / mod_math.pi) / 2.0 * n if not leave_float: xtile = int(xtile) ytile = int(ytile) return TileInfo(xtile, ytile, zoom)
python
def _deg2num(self, lat_deg, lon_deg, zoom, leave_float=False): """ Taken from http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Python """ lat_rad = mod_math.radians(lat_deg) n = 2.0 ** zoom xtile = (lon_deg + 180.0) / 360.0 * n ytile = (1.0 - mod_math.log(mod_math.tan(lat_rad) + (1 / mod_math.cos(lat_rad))) / mod_math.pi) / 2.0 * n if not leave_float: xtile = int(xtile) ytile = int(ytile) return TileInfo(xtile, ytile, zoom)
['def', '_deg2num', '(', 'self', ',', 'lat_deg', ',', 'lon_deg', ',', 'zoom', ',', 'leave_float', '=', 'False', ')', ':', 'lat_rad', '=', 'mod_math', '.', 'radians', '(', 'lat_deg', ')', 'n', '=', '2.0', '**', 'zoom', 'xtile', '=', '(', 'lon_deg', '+', '180.0', ')', '/', '360.0', '*', 'n', 'ytile', '=', '(', '1.0', '-', 'mod_math', '.', 'log', '(', 'mod_math', '.', 'tan', '(', 'lat_rad', ')', '+', '(', '1', '/', 'mod_math', '.', 'cos', '(', 'lat_rad', ')', ')', ')', '/', 'mod_math', '.', 'pi', ')', '/', '2.0', '*', 'n', 'if', 'not', 'leave_float', ':', 'xtile', '=', 'int', '(', 'xtile', ')', 'ytile', '=', 'int', '(', 'ytile', ')', 'return', 'TileInfo', '(', 'xtile', ',', 'ytile', ',', 'zoom', ')']
Taken from http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Python
['Taken', 'from', 'http', ':', '//', 'wiki', '.', 'openstreetmap', '.', 'org', '/', 'wiki', '/', 'Slippy_map_tilenames#Python']
train
https://github.com/tkrajina/TileStitcher/blob/aba142f273a864e86a863e206e215214217e5202/tilestitcher/__init__.py#L70-L79
7,173
Alveo/pyalveo
pyalveo/pyalveo.py
Client.get_primary_text
def get_primary_text(self, item_url, force_download=False): """ Retrieve the primary text for an item from the server :type item_url: String or Item :param item_url: URL of the item, or an Item object :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the item's primary text if it has one, otherwise None :raises: APIError if the request was not successful """ item_url = str(item_url) metadata = self.get_item(item_url).metadata() try: primary_text_url = metadata['alveo:primary_text_url'] except KeyError: return None if primary_text_url == 'No primary text found': return None if (self.use_cache and not force_download and self.cache.has_primary_text(item_url)): primary_text = self.cache.get_primary_text(item_url) else: primary_text = self.api_request(primary_text_url, raw=True) if self.update_cache: self.cache.add_primary_text(item_url, primary_text) return primary_text
python
def get_primary_text(self, item_url, force_download=False): """ Retrieve the primary text for an item from the server :type item_url: String or Item :param item_url: URL of the item, or an Item object :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the item's primary text if it has one, otherwise None :raises: APIError if the request was not successful """ item_url = str(item_url) metadata = self.get_item(item_url).metadata() try: primary_text_url = metadata['alveo:primary_text_url'] except KeyError: return None if primary_text_url == 'No primary text found': return None if (self.use_cache and not force_download and self.cache.has_primary_text(item_url)): primary_text = self.cache.get_primary_text(item_url) else: primary_text = self.api_request(primary_text_url, raw=True) if self.update_cache: self.cache.add_primary_text(item_url, primary_text) return primary_text
['def', 'get_primary_text', '(', 'self', ',', 'item_url', ',', 'force_download', '=', 'False', ')', ':', 'item_url', '=', 'str', '(', 'item_url', ')', 'metadata', '=', 'self', '.', 'get_item', '(', 'item_url', ')', '.', 'metadata', '(', ')', 'try', ':', 'primary_text_url', '=', 'metadata', '[', "'alveo:primary_text_url'", ']', 'except', 'KeyError', ':', 'return', 'None', 'if', 'primary_text_url', '==', "'No primary text found'", ':', 'return', 'None', 'if', '(', 'self', '.', 'use_cache', 'and', 'not', 'force_download', 'and', 'self', '.', 'cache', '.', 'has_primary_text', '(', 'item_url', ')', ')', ':', 'primary_text', '=', 'self', '.', 'cache', '.', 'get_primary_text', '(', 'item_url', ')', 'else', ':', 'primary_text', '=', 'self', '.', 'api_request', '(', 'primary_text_url', ',', 'raw', '=', 'True', ')', 'if', 'self', '.', 'update_cache', ':', 'self', '.', 'cache', '.', 'add_primary_text', '(', 'item_url', ',', 'primary_text', ')', 'return', 'primary_text']
Retrieve the primary text for an item from the server :type item_url: String or Item :param item_url: URL of the item, or an Item object :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the item's primary text if it has one, otherwise None :raises: APIError if the request was not successful
['Retrieve', 'the', 'primary', 'text', 'for', 'an', 'item', 'from', 'the', 'server']
train
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/pyalveo.py#L750-L786
7,174
jmgilman/Neolib
neolib/pyamf/util/__init__.py
set_attrs
def set_attrs(obj, attrs): """ Applies a collection of attributes C{attrs} to object C{obj} in the most generic way possible. @param obj: An instance implementing C{__setattr__}, or C{__setitem__} @param attrs: A collection implementing the C{iteritems} function @type attrs: Usually a dict """ o = setattr if hasattr(obj, '__setitem__'): o = type(obj).__setitem__ [o(obj, k, v) for k, v in attrs.iteritems()]
python
def set_attrs(obj, attrs): """ Applies a collection of attributes C{attrs} to object C{obj} in the most generic way possible. @param obj: An instance implementing C{__setattr__}, or C{__setitem__} @param attrs: A collection implementing the C{iteritems} function @type attrs: Usually a dict """ o = setattr if hasattr(obj, '__setitem__'): o = type(obj).__setitem__ [o(obj, k, v) for k, v in attrs.iteritems()]
['def', 'set_attrs', '(', 'obj', ',', 'attrs', ')', ':', 'o', '=', 'setattr', 'if', 'hasattr', '(', 'obj', ',', "'__setitem__'", ')', ':', 'o', '=', 'type', '(', 'obj', ')', '.', '__setitem__', '[', 'o', '(', 'obj', ',', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'attrs', '.', 'iteritems', '(', ')', ']']
Applies a collection of attributes C{attrs} to object C{obj} in the most generic way possible. @param obj: An instance implementing C{__setattr__}, or C{__setitem__} @param attrs: A collection implementing the C{iteritems} function @type attrs: Usually a dict
['Applies', 'a', 'collection', 'of', 'attributes', 'C', '{', 'attrs', '}', 'to', 'object', 'C', '{', 'obj', '}', 'in', 'the', 'most', 'generic', 'way', 'possible', '.']
train
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/util/__init__.py#L75-L89
7,175
nfcpy/nfcpy
src/nfc/handover/client.py
HandoverClient.send
def send(self, message): """Send a handover request message to the remote server.""" log.debug("sending '{0}' message".format(message.type)) send_miu = self.socket.getsockopt(nfc.llcp.SO_SNDMIU) try: data = str(message) except nfc.llcp.EncodeError as e: log.error("message encoding failed: {0}".format(e)) else: return self._send(data, send_miu)
python
def send(self, message): """Send a handover request message to the remote server.""" log.debug("sending '{0}' message".format(message.type)) send_miu = self.socket.getsockopt(nfc.llcp.SO_SNDMIU) try: data = str(message) except nfc.llcp.EncodeError as e: log.error("message encoding failed: {0}".format(e)) else: return self._send(data, send_miu)
['def', 'send', '(', 'self', ',', 'message', ')', ':', 'log', '.', 'debug', '(', '"sending \'{0}\' message"', '.', 'format', '(', 'message', '.', 'type', ')', ')', 'send_miu', '=', 'self', '.', 'socket', '.', 'getsockopt', '(', 'nfc', '.', 'llcp', '.', 'SO_SNDMIU', ')', 'try', ':', 'data', '=', 'str', '(', 'message', ')', 'except', 'nfc', '.', 'llcp', '.', 'EncodeError', 'as', 'e', ':', 'log', '.', 'error', '(', '"message encoding failed: {0}"', '.', 'format', '(', 'e', ')', ')', 'else', ':', 'return', 'self', '.', '_send', '(', 'data', ',', 'send_miu', ')']
Send a handover request message to the remote server.
['Send', 'a', 'handover', 'request', 'message', 'to', 'the', 'remote', 'server', '.']
train
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/handover/client.py#L59-L68
7,176
vals/umis
umis/umis.py
umi_histogram
def umi_histogram(fastq): ''' Counts the number of reads for each UMI Expects formatted fastq files. ''' annotations = detect_fastq_annotations(fastq) re_string = construct_transformed_regex(annotations) parser_re = re.compile(re_string) counter = collections.Counter() for read in read_fastq(fastq): match = parser_re.search(read).groupdict() counter[match['MB']] += 1 for bc, count in counter.most_common(): sys.stdout.write('{}\t{}\n'.format(bc, count))
python
def umi_histogram(fastq): ''' Counts the number of reads for each UMI Expects formatted fastq files. ''' annotations = detect_fastq_annotations(fastq) re_string = construct_transformed_regex(annotations) parser_re = re.compile(re_string) counter = collections.Counter() for read in read_fastq(fastq): match = parser_re.search(read).groupdict() counter[match['MB']] += 1 for bc, count in counter.most_common(): sys.stdout.write('{}\t{}\n'.format(bc, count))
['def', 'umi_histogram', '(', 'fastq', ')', ':', 'annotations', '=', 'detect_fastq_annotations', '(', 'fastq', ')', 're_string', '=', 'construct_transformed_regex', '(', 'annotations', ')', 'parser_re', '=', 're', '.', 'compile', '(', 're_string', ')', 'counter', '=', 'collections', '.', 'Counter', '(', ')', 'for', 'read', 'in', 'read_fastq', '(', 'fastq', ')', ':', 'match', '=', 'parser_re', '.', 'search', '(', 'read', ')', '.', 'groupdict', '(', ')', 'counter', '[', 'match', '[', "'MB'", ']', ']', '+=', '1', 'for', 'bc', ',', 'count', 'in', 'counter', '.', 'most_common', '(', ')', ':', 'sys', '.', 'stdout', '.', 'write', '(', "'{}\\t{}\\n'", '.', 'format', '(', 'bc', ',', 'count', ')', ')']
Counts the number of reads for each UMI Expects formatted fastq files.
['Counts', 'the', 'number', 'of', 'reads', 'for', 'each', 'UMI']
train
https://github.com/vals/umis/blob/e8adb8486d9e9134ab8a6cad9811a7e74dcc4a2c/umis/umis.py#L989-L1004
7,177
wal-e/wal-e
wal_e/worker/upload_pool.py
TarUploadPool._start
def _start(self, tpart): """Start upload and accout for resource consumption.""" g = gevent.Greenlet(self.uploader, tpart) g.link(self._finish) # Account for concurrency_burden before starting the greenlet # to avoid racing against .join. self.concurrency_burden += 1 self.member_burden += len(tpart) g.start()
python
def _start(self, tpart): """Start upload and accout for resource consumption.""" g = gevent.Greenlet(self.uploader, tpart) g.link(self._finish) # Account for concurrency_burden before starting the greenlet # to avoid racing against .join. self.concurrency_burden += 1 self.member_burden += len(tpart) g.start()
['def', '_start', '(', 'self', ',', 'tpart', ')', ':', 'g', '=', 'gevent', '.', 'Greenlet', '(', 'self', '.', 'uploader', ',', 'tpart', ')', 'g', '.', 'link', '(', 'self', '.', '_finish', ')', '# Account for concurrency_burden before starting the greenlet', '# to avoid racing against .join.', 'self', '.', 'concurrency_burden', '+=', '1', 'self', '.', 'member_burden', '+=', 'len', '(', 'tpart', ')', 'g', '.', 'start', '(', ')']
Start upload and accout for resource consumption.
['Start', 'upload', 'and', 'accout', 'for', 'resource', 'consumption', '.']
train
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/upload_pool.py#L29-L40
7,178
google/mobly
mobly/controllers/android_device_lib/callback_handler.py
CallbackHandler.getAll
def getAll(self, event_name): """Gets all the events of a certain name that have been received so far. This is a non-blocking call. Args: callback_id: The id of the callback. event_name: string, the name of the event to get. Returns: A list of SnippetEvent, each representing an event from the Java side. """ raw_events = self._event_client.eventGetAll(self._id, event_name) return [snippet_event.from_dict(msg) for msg in raw_events]
python
def getAll(self, event_name): """Gets all the events of a certain name that have been received so far. This is a non-blocking call. Args: callback_id: The id of the callback. event_name: string, the name of the event to get. Returns: A list of SnippetEvent, each representing an event from the Java side. """ raw_events = self._event_client.eventGetAll(self._id, event_name) return [snippet_event.from_dict(msg) for msg in raw_events]
['def', 'getAll', '(', 'self', ',', 'event_name', ')', ':', 'raw_events', '=', 'self', '.', '_event_client', '.', 'eventGetAll', '(', 'self', '.', '_id', ',', 'event_name', ')', 'return', '[', 'snippet_event', '.', 'from_dict', '(', 'msg', ')', 'for', 'msg', 'in', 'raw_events', ']']
Gets all the events of a certain name that have been received so far. This is a non-blocking call. Args: callback_id: The id of the callback. event_name: string, the name of the event to get. Returns: A list of SnippetEvent, each representing an event from the Java side.
['Gets', 'all', 'the', 'events', 'of', 'a', 'certain', 'name', 'that', 'have', 'been', 'received', 'so', 'far', '.', 'This', 'is', 'a', 'non', '-', 'blocking', 'call', '.']
train
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device_lib/callback_handler.py#L157-L170
7,179
franciscogarate/pyliferisk
pyliferisk/__init__.py
aaxn
def aaxn(mt, x, n, m=1): """ äxn : Return the actuarial present value of a (immediate) temporal (term certain) annuity: n-year temporary life annuity-anticipatory. Payable 'm' per year at the beginning of the period """ if m == 1: return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x] else: return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x] - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, n)))
python
def aaxn(mt, x, n, m=1): """ äxn : Return the actuarial present value of a (immediate) temporal (term certain) annuity: n-year temporary life annuity-anticipatory. Payable 'm' per year at the beginning of the period """ if m == 1: return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x] else: return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x] - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, n)))
['def', 'aaxn', '(', 'mt', ',', 'x', ',', 'n', ',', 'm', '=', '1', ')', ':', 'if', 'm', '==', '1', ':', 'return', '(', 'mt', '.', 'Nx', '[', 'x', ']', '-', 'mt', '.', 'Nx', '[', 'x', '+', 'n', ']', ')', '/', 'mt', '.', 'Dx', '[', 'x', ']', 'else', ':', 'return', '(', 'mt', '.', 'Nx', '[', 'x', ']', '-', 'mt', '.', 'Nx', '[', 'x', '+', 'n', ']', ')', '/', 'mt', '.', 'Dx', '[', 'x', ']', '-', '(', '(', 'float', '(', 'm', '-', '1', ')', '/', 'float', '(', 'm', '*', '2', ')', ')', '*', '(', '1', '-', 'nEx', '(', 'mt', ',', 'x', ',', 'n', ')', ')', ')']
äxn : Return the actuarial present value of a (immediate) temporal (term certain) annuity: n-year temporary life annuity-anticipatory. Payable 'm' per year at the beginning of the period
['äxn', ':', 'Return', 'the', 'actuarial', 'present', 'value', 'of', 'a', '(', 'immediate', ')', 'temporal', '(', 'term', 'certain', ')', 'annuity', ':', 'n', '-', 'year', 'temporary', 'life', 'annuity', '-', 'anticipatory', '.', 'Payable', 'm', 'per', 'year', 'at', 'the', 'beginning', 'of', 'the', 'period']
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L357-L364
7,180
oseledets/ttpy
tt/core/tools.py
zkronv
def zkronv(ttA, ttB): """ Do kronecker product between vectors ttA and ttB. Look about kronecker at: https://en.wikipedia.org/wiki/Kronecker_product For details about operation refer: https://arxiv.org/abs/1802.02839 :param ttA: first TT-vector; :param ttB: second TT-vector; :return: operation result in z-order """ Al = _vector.vector.to_list(ttA) Bl = _vector.vector.to_list(ttB) Hl = [_np.kron(B, A) for (A, B) in zip(Al, Bl)] return _vector.vector.from_list(Hl)
python
def zkronv(ttA, ttB): """ Do kronecker product between vectors ttA and ttB. Look about kronecker at: https://en.wikipedia.org/wiki/Kronecker_product For details about operation refer: https://arxiv.org/abs/1802.02839 :param ttA: first TT-vector; :param ttB: second TT-vector; :return: operation result in z-order """ Al = _vector.vector.to_list(ttA) Bl = _vector.vector.to_list(ttB) Hl = [_np.kron(B, A) for (A, B) in zip(Al, Bl)] return _vector.vector.from_list(Hl)
['def', 'zkronv', '(', 'ttA', ',', 'ttB', ')', ':', 'Al', '=', '_vector', '.', 'vector', '.', 'to_list', '(', 'ttA', ')', 'Bl', '=', '_vector', '.', 'vector', '.', 'to_list', '(', 'ttB', ')', 'Hl', '=', '[', '_np', '.', 'kron', '(', 'B', ',', 'A', ')', 'for', '(', 'A', ',', 'B', ')', 'in', 'zip', '(', 'Al', ',', 'Bl', ')', ']', 'return', '_vector', '.', 'vector', '.', 'from_list', '(', 'Hl', ')']
Do kronecker product between vectors ttA and ttB. Look about kronecker at: https://en.wikipedia.org/wiki/Kronecker_product For details about operation refer: https://arxiv.org/abs/1802.02839 :param ttA: first TT-vector; :param ttB: second TT-vector; :return: operation result in z-order
['Do', 'kronecker', 'product', 'between', 'vectors', 'ttA', 'and', 'ttB', '.', 'Look', 'about', 'kronecker', 'at', ':', 'https', ':', '//', 'en', '.', 'wikipedia', '.', 'org', '/', 'wiki', '/', 'Kronecker_product', 'For', 'details', 'about', 'operation', 'refer', ':', 'https', ':', '//', 'arxiv', '.', 'org', '/', 'abs', '/', '1802', '.', '02839', ':', 'param', 'ttA', ':', 'first', 'TT', '-', 'vector', ';', ':', 'param', 'ttB', ':', 'second', 'TT', '-', 'vector', ';', ':', 'return', ':', 'operation', 'result', 'in', 'z', '-', 'order']
train
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L200-L212
7,181
dpgaspar/Flask-AppBuilder
flask_appbuilder/base.py
AppBuilder.init_app
def init_app(self, app, session): """ Will initialize the Flask app, supporting the app factory pattern. :param app: :param session: The SQLAlchemy session """ app.config.setdefault("APP_NAME", "F.A.B.") app.config.setdefault("APP_THEME", "") app.config.setdefault("APP_ICON", "") app.config.setdefault("LANGUAGES", {"en": {"flag": "gb", "name": "English"}}) app.config.setdefault("ADDON_MANAGERS", []) app.config.setdefault("FAB_API_MAX_PAGE_SIZE", 20) self.app = app if self.update_perms: # default is True, if False takes precedence from config self.update_perms = app.config.get('FAB_UPDATE_PERMS', True) _security_manager_class_name = app.config.get('FAB_SECURITY_MANAGER_CLASS', None) if _security_manager_class_name is not None: self.security_manager_class = dynamic_class_import( _security_manager_class_name ) if self.security_manager_class is None: from flask_appbuilder.security.sqla.manager import SecurityManager self.security_manager_class = SecurityManager self._addon_managers = app.config["ADDON_MANAGERS"] self.session = session self.sm = self.security_manager_class(self) self.bm = BabelManager(self) self.openapi_manager = OpenApiManager(self) self._add_global_static() self._add_global_filters() app.before_request(self.sm.before_request) self._add_admin_views() self._add_addon_views() if self.app: self._add_menu_permissions() else: self.post_init() self._init_extension(app)
python
def init_app(self, app, session): """ Will initialize the Flask app, supporting the app factory pattern. :param app: :param session: The SQLAlchemy session """ app.config.setdefault("APP_NAME", "F.A.B.") app.config.setdefault("APP_THEME", "") app.config.setdefault("APP_ICON", "") app.config.setdefault("LANGUAGES", {"en": {"flag": "gb", "name": "English"}}) app.config.setdefault("ADDON_MANAGERS", []) app.config.setdefault("FAB_API_MAX_PAGE_SIZE", 20) self.app = app if self.update_perms: # default is True, if False takes precedence from config self.update_perms = app.config.get('FAB_UPDATE_PERMS', True) _security_manager_class_name = app.config.get('FAB_SECURITY_MANAGER_CLASS', None) if _security_manager_class_name is not None: self.security_manager_class = dynamic_class_import( _security_manager_class_name ) if self.security_manager_class is None: from flask_appbuilder.security.sqla.manager import SecurityManager self.security_manager_class = SecurityManager self._addon_managers = app.config["ADDON_MANAGERS"] self.session = session self.sm = self.security_manager_class(self) self.bm = BabelManager(self) self.openapi_manager = OpenApiManager(self) self._add_global_static() self._add_global_filters() app.before_request(self.sm.before_request) self._add_admin_views() self._add_addon_views() if self.app: self._add_menu_permissions() else: self.post_init() self._init_extension(app)
['def', 'init_app', '(', 'self', ',', 'app', ',', 'session', ')', ':', 'app', '.', 'config', '.', 'setdefault', '(', '"APP_NAME"', ',', '"F.A.B."', ')', 'app', '.', 'config', '.', 'setdefault', '(', '"APP_THEME"', ',', '""', ')', 'app', '.', 'config', '.', 'setdefault', '(', '"APP_ICON"', ',', '""', ')', 'app', '.', 'config', '.', 'setdefault', '(', '"LANGUAGES"', ',', '{', '"en"', ':', '{', '"flag"', ':', '"gb"', ',', '"name"', ':', '"English"', '}', '}', ')', 'app', '.', 'config', '.', 'setdefault', '(', '"ADDON_MANAGERS"', ',', '[', ']', ')', 'app', '.', 'config', '.', 'setdefault', '(', '"FAB_API_MAX_PAGE_SIZE"', ',', '20', ')', 'self', '.', 'app', '=', 'app', 'if', 'self', '.', 'update_perms', ':', '# default is True, if False takes precedence from config', 'self', '.', 'update_perms', '=', 'app', '.', 'config', '.', 'get', '(', "'FAB_UPDATE_PERMS'", ',', 'True', ')', '_security_manager_class_name', '=', 'app', '.', 'config', '.', 'get', '(', "'FAB_SECURITY_MANAGER_CLASS'", ',', 'None', ')', 'if', '_security_manager_class_name', 'is', 'not', 'None', ':', 'self', '.', 'security_manager_class', '=', 'dynamic_class_import', '(', '_security_manager_class_name', ')', 'if', 'self', '.', 'security_manager_class', 'is', 'None', ':', 'from', 'flask_appbuilder', '.', 'security', '.', 'sqla', '.', 'manager', 'import', 'SecurityManager', 'self', '.', 'security_manager_class', '=', 'SecurityManager', 'self', '.', '_addon_managers', '=', 'app', '.', 'config', '[', '"ADDON_MANAGERS"', ']', 'self', '.', 'session', '=', 'session', 'self', '.', 'sm', '=', 'self', '.', 'security_manager_class', '(', 'self', ')', 'self', '.', 'bm', '=', 'BabelManager', '(', 'self', ')', 'self', '.', 'openapi_manager', '=', 'OpenApiManager', '(', 'self', ')', 'self', '.', '_add_global_static', '(', ')', 'self', '.', '_add_global_filters', '(', ')', 'app', '.', 'before_request', '(', 'self', '.', 'sm', '.', 'before_request', ')', 'self', '.', '_add_admin_views', '(', ')', 'self', '.', '_add_addon_views', '(', ')', 'if', 'self', '.', 'app', ':', 'self', '.', '_add_menu_permissions', '(', ')', 'else', ':', 'self', '.', 'post_init', '(', ')', 'self', '.', '_init_extension', '(', 'app', ')']
Will initialize the Flask app, supporting the app factory pattern. :param app: :param session: The SQLAlchemy session
['Will', 'initialize', 'the', 'Flask', 'app', 'supporting', 'the', 'app', 'factory', 'pattern', '.']
train
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/base.py#L149-L189
7,182
gbowerman/azurerm
docs/py2md.py
extract_code
def extract_code(end_mark, current_str, str_array, line_num): '''Extract a multi-line string from a string array, up to a specified end marker. Args: end_mark (str): The end mark string to match for. current_str (str): The first line of the string array. str_array (list): An array of strings (lines). line_num (int): The current offset into the array. Returns: Extended string up to line with end marker. ''' if end_mark not in current_str: reached_end = False line_num += 1 while reached_end is False: next_line = str_array[line_num] if end_mark in next_line: reached_end = True else: line_num += 1 current_str += next_line clean_str = current_str.split(end_mark)[0] return {'current_str': clean_str, 'line_num': line_num}
python
def extract_code(end_mark, current_str, str_array, line_num): '''Extract a multi-line string from a string array, up to a specified end marker. Args: end_mark (str): The end mark string to match for. current_str (str): The first line of the string array. str_array (list): An array of strings (lines). line_num (int): The current offset into the array. Returns: Extended string up to line with end marker. ''' if end_mark not in current_str: reached_end = False line_num += 1 while reached_end is False: next_line = str_array[line_num] if end_mark in next_line: reached_end = True else: line_num += 1 current_str += next_line clean_str = current_str.split(end_mark)[0] return {'current_str': clean_str, 'line_num': line_num}
['def', 'extract_code', '(', 'end_mark', ',', 'current_str', ',', 'str_array', ',', 'line_num', ')', ':', 'if', 'end_mark', 'not', 'in', 'current_str', ':', 'reached_end', '=', 'False', 'line_num', '+=', '1', 'while', 'reached_end', 'is', 'False', ':', 'next_line', '=', 'str_array', '[', 'line_num', ']', 'if', 'end_mark', 'in', 'next_line', ':', 'reached_end', '=', 'True', 'else', ':', 'line_num', '+=', '1', 'current_str', '+=', 'next_line', 'clean_str', '=', 'current_str', '.', 'split', '(', 'end_mark', ')', '[', '0', ']', 'return', '{', "'current_str'", ':', 'clean_str', ',', "'line_num'", ':', 'line_num', '}']
Extract a multi-line string from a string array, up to a specified end marker. Args: end_mark (str): The end mark string to match for. current_str (str): The first line of the string array. str_array (list): An array of strings (lines). line_num (int): The current offset into the array. Returns: Extended string up to line with end marker.
['Extract', 'a', 'multi', '-', 'line', 'string', 'from', 'a', 'string', 'array', 'up', 'to', 'a', 'specified', 'end', 'marker', '.']
train
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/docs/py2md.py#L7-L30
7,183
UCL-INGI/INGInious
inginious/agent/docker_agent/_docker_interface.py
DockerInterface.remove_container
def remove_container(self, container_id): """ Removes a container (with fire) """ self._docker.containers.get(container_id).remove(v=True, link=False, force=True)
python
def remove_container(self, container_id): """ Removes a container (with fire) """ self._docker.containers.get(container_id).remove(v=True, link=False, force=True)
['def', 'remove_container', '(', 'self', ',', 'container_id', ')', ':', 'self', '.', '_docker', '.', 'containers', '.', 'get', '(', 'container_id', ')', '.', 'remove', '(', 'v', '=', 'True', ',', 'link', '=', 'False', ',', 'force', '=', 'True', ')']
Removes a container (with fire)
['Removes', 'a', 'container', '(', 'with', 'fire', ')']
train
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/agent/docker_agent/_docker_interface.py#L175-L179
7,184
inveniosoftware-contrib/invenio-workflows
invenio_workflows/worker_engine.py
run_worker
def run_worker(wname, data, engine_uuid_hex=None, **kwargs): """Run a workflow by name with list of data objects. The list of data can also contain WorkflowObjects. ``**kwargs`` can be used to pass custom arguments to the engine/object. :param wname: name of workflow to run. :type wname: str :param data: objects to run through the workflow. :type data: list :param engine_uuid_hex: hex string of the uuid of the engine to use, if not passed will create a new one. :type data: str :return: WorkflowEngine instance """ if 'stop_on_halt' not in kwargs: kwargs['stop_on_halt'] = False if engine_uuid_hex: engine_uuid = uuid.UUID(hex=engine_uuid_hex) engine = WorkflowEngine.from_uuid(uuid=engine_uuid, **kwargs) else: engine = WorkflowEngine.with_name(wname, **kwargs) engine.save() objects = get_workflow_object_instances(data, engine) db.session.commit() engine.process(objects, **kwargs) return engine
python
def run_worker(wname, data, engine_uuid_hex=None, **kwargs): """Run a workflow by name with list of data objects. The list of data can also contain WorkflowObjects. ``**kwargs`` can be used to pass custom arguments to the engine/object. :param wname: name of workflow to run. :type wname: str :param data: objects to run through the workflow. :type data: list :param engine_uuid_hex: hex string of the uuid of the engine to use, if not passed will create a new one. :type data: str :return: WorkflowEngine instance """ if 'stop_on_halt' not in kwargs: kwargs['stop_on_halt'] = False if engine_uuid_hex: engine_uuid = uuid.UUID(hex=engine_uuid_hex) engine = WorkflowEngine.from_uuid(uuid=engine_uuid, **kwargs) else: engine = WorkflowEngine.with_name(wname, **kwargs) engine.save() objects = get_workflow_object_instances(data, engine) db.session.commit() engine.process(objects, **kwargs) return engine
['def', 'run_worker', '(', 'wname', ',', 'data', ',', 'engine_uuid_hex', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', "'stop_on_halt'", 'not', 'in', 'kwargs', ':', 'kwargs', '[', "'stop_on_halt'", ']', '=', 'False', 'if', 'engine_uuid_hex', ':', 'engine_uuid', '=', 'uuid', '.', 'UUID', '(', 'hex', '=', 'engine_uuid_hex', ')', 'engine', '=', 'WorkflowEngine', '.', 'from_uuid', '(', 'uuid', '=', 'engine_uuid', ',', '*', '*', 'kwargs', ')', 'else', ':', 'engine', '=', 'WorkflowEngine', '.', 'with_name', '(', 'wname', ',', '*', '*', 'kwargs', ')', 'engine', '.', 'save', '(', ')', 'objects', '=', 'get_workflow_object_instances', '(', 'data', ',', 'engine', ')', 'db', '.', 'session', '.', 'commit', '(', ')', 'engine', '.', 'process', '(', 'objects', ',', '*', '*', 'kwargs', ')', 'return', 'engine']
Run a workflow by name with list of data objects. The list of data can also contain WorkflowObjects. ``**kwargs`` can be used to pass custom arguments to the engine/object. :param wname: name of workflow to run. :type wname: str :param data: objects to run through the workflow. :type data: list :param engine_uuid_hex: hex string of the uuid of the engine to use, if not passed will create a new one. :type data: str :return: WorkflowEngine instance
['Run', 'a', 'workflow', 'by', 'name', 'with', 'list', 'of', 'data', 'objects', '.']
train
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/worker_engine.py#L30-L62
7,185
OpenAgInitiative/openag_python
openag/utils.py
dedupe_by
def dedupe_by(things, key=None): """ Given an iterator of things and an optional key generation function, return a new iterator of deduped things. Things are compared and de-duped by the key function, which is hash() by default. """ if not key: key = hash index = {key(thing): thing for thing in things} return index.values()
python
def dedupe_by(things, key=None): """ Given an iterator of things and an optional key generation function, return a new iterator of deduped things. Things are compared and de-duped by the key function, which is hash() by default. """ if not key: key = hash index = {key(thing): thing for thing in things} return index.values()
['def', 'dedupe_by', '(', 'things', ',', 'key', '=', 'None', ')', ':', 'if', 'not', 'key', ':', 'key', '=', 'hash', 'index', '=', '{', 'key', '(', 'thing', ')', ':', 'thing', 'for', 'thing', 'in', 'things', '}', 'return', 'index', '.', 'values', '(', ')']
Given an iterator of things and an optional key generation function, return a new iterator of deduped things. Things are compared and de-duped by the key function, which is hash() by default.
['Given', 'an', 'iterator', 'of', 'things', 'and', 'an', 'optional', 'key', 'generation', 'function', 'return', 'a', 'new', 'iterator', 'of', 'deduped', 'things', '.', 'Things', 'are', 'compared', 'and', 'de', '-', 'duped', 'by', 'the', 'key', 'function', 'which', 'is', 'hash', '()', 'by', 'default', '.']
train
https://github.com/OpenAgInitiative/openag_python/blob/f6202340292bbf7185e1a7d4290188c0dacbb8d0/openag/utils.py#L163-L172
7,186
Chilipp/model-organization
model_organization/__init__.py
ModelOrganizer.info
def info(self, exp_path=False, project_path=False, global_path=False, config_path=False, complete=False, no_fix=False, on_projects=False, on_globals=False, projectname=None, return_dict=False, insert_id=True, only_keys=False, archives=False, **kwargs): """ Print information on the experiments Parameters ---------- exp_path: bool If True/set, print the filename of the experiment configuration project_path: bool If True/set, print the filename on the project configuration global_path: bool If True/set, print the filename on the global configuration config_path: bool If True/set, print the path to the configuration directory complete: bool If True/set, the information on all experiments are printed no_fix: bool If set, paths are given relative to the root directory of the project on_projects: bool If set, show information on the projects rather than the experiment on_globals: bool If set, show the global configuration settings projectname: str The name of the project that shall be used. If provided and `on_projects` is not True, the information on all experiments for this project will be shown return_dict: bool If True, the dictionary is returned instead of printed insert_id: bool If True and neither `on_projects`, nor `on_globals`, nor `projectname` is given, the experiment id is inserted in the dictionary only_keys: bool If True, only the keys of the given dictionary are printed archives: bool If True, print the archives and the corresponding experiments for the specified project """ self.app_main(**kwargs) def get_archives(project): ret = OrderedDict() for exp, a in self.config.experiments.items(): if self.is_archived(exp) and a.project == project: ret.setdefault(str(a), []).append(exp) return ret paths = OrderedDict([ ('conf_dir', config_path), ('_globals_file', global_path)]) if any(paths.values()): for key, val in paths.items(): if val: return (self.print_ or six.print_)(getattr( self.config, key)) return if archives: base = OrderedDict() current = projectname or self.projectname if complete: for project in self.config.projects.keys(): d = get_archives(project) if d: base[project] = d else: base[current] = get_archives(current) elif exp_path: current = self.experiment base = self.config.experiments.exp_files elif project_path: current = self.projectname base = OrderedDict( (key, osp.join(val, '.project', '.project.yml')) for key, val in self.config.projects.project_paths.items()) elif on_globals: complete = True no_fix = True base = self.config.global_config elif on_projects: base = OrderedDict(self.config.projects) current = projectname or self.projectname else: current = self.experiment if projectname is None: if insert_id: base = copy.deepcopy(self.config.experiments) if not complete: base[current]['id'] = current if six.PY3: base[current].move_to_end('id', last=False) else: base = self.config.experiments if not only_keys: # make sure the experiments are loaded if complete: base.load() else: base[current] # convert to an OrderedDict base = base.as_ordereddict() else: base = OrderedDict( (exp, self.config.experiments[exp]) for exp in self.config.experiments.project_map[projectname] ) complete = True if no_fix and not (archives or on_globals): for key, val in base.items(): if isinstance(val, dict): base[key] = self.rel_paths(copy.deepcopy(val)) if not complete: base = base[current] if only_keys: base = list(base.keys()) if not return_dict: if isinstance(base, six.string_types): ret = base else: ret = ordered_yaml_dump(base, default_flow_style=False) return (self.print_ or six.print_)(ret.rstrip()) else: return base
python
def info(self, exp_path=False, project_path=False, global_path=False, config_path=False, complete=False, no_fix=False, on_projects=False, on_globals=False, projectname=None, return_dict=False, insert_id=True, only_keys=False, archives=False, **kwargs): """ Print information on the experiments Parameters ---------- exp_path: bool If True/set, print the filename of the experiment configuration project_path: bool If True/set, print the filename on the project configuration global_path: bool If True/set, print the filename on the global configuration config_path: bool If True/set, print the path to the configuration directory complete: bool If True/set, the information on all experiments are printed no_fix: bool If set, paths are given relative to the root directory of the project on_projects: bool If set, show information on the projects rather than the experiment on_globals: bool If set, show the global configuration settings projectname: str The name of the project that shall be used. If provided and `on_projects` is not True, the information on all experiments for this project will be shown return_dict: bool If True, the dictionary is returned instead of printed insert_id: bool If True and neither `on_projects`, nor `on_globals`, nor `projectname` is given, the experiment id is inserted in the dictionary only_keys: bool If True, only the keys of the given dictionary are printed archives: bool If True, print the archives and the corresponding experiments for the specified project """ self.app_main(**kwargs) def get_archives(project): ret = OrderedDict() for exp, a in self.config.experiments.items(): if self.is_archived(exp) and a.project == project: ret.setdefault(str(a), []).append(exp) return ret paths = OrderedDict([ ('conf_dir', config_path), ('_globals_file', global_path)]) if any(paths.values()): for key, val in paths.items(): if val: return (self.print_ or six.print_)(getattr( self.config, key)) return if archives: base = OrderedDict() current = projectname or self.projectname if complete: for project in self.config.projects.keys(): d = get_archives(project) if d: base[project] = d else: base[current] = get_archives(current) elif exp_path: current = self.experiment base = self.config.experiments.exp_files elif project_path: current = self.projectname base = OrderedDict( (key, osp.join(val, '.project', '.project.yml')) for key, val in self.config.projects.project_paths.items()) elif on_globals: complete = True no_fix = True base = self.config.global_config elif on_projects: base = OrderedDict(self.config.projects) current = projectname or self.projectname else: current = self.experiment if projectname is None: if insert_id: base = copy.deepcopy(self.config.experiments) if not complete: base[current]['id'] = current if six.PY3: base[current].move_to_end('id', last=False) else: base = self.config.experiments if not only_keys: # make sure the experiments are loaded if complete: base.load() else: base[current] # convert to an OrderedDict base = base.as_ordereddict() else: base = OrderedDict( (exp, self.config.experiments[exp]) for exp in self.config.experiments.project_map[projectname] ) complete = True if no_fix and not (archives or on_globals): for key, val in base.items(): if isinstance(val, dict): base[key] = self.rel_paths(copy.deepcopy(val)) if not complete: base = base[current] if only_keys: base = list(base.keys()) if not return_dict: if isinstance(base, six.string_types): ret = base else: ret = ordered_yaml_dump(base, default_flow_style=False) return (self.print_ or six.print_)(ret.rstrip()) else: return base
['def', 'info', '(', 'self', ',', 'exp_path', '=', 'False', ',', 'project_path', '=', 'False', ',', 'global_path', '=', 'False', ',', 'config_path', '=', 'False', ',', 'complete', '=', 'False', ',', 'no_fix', '=', 'False', ',', 'on_projects', '=', 'False', ',', 'on_globals', '=', 'False', ',', 'projectname', '=', 'None', ',', 'return_dict', '=', 'False', ',', 'insert_id', '=', 'True', ',', 'only_keys', '=', 'False', ',', 'archives', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'app_main', '(', '*', '*', 'kwargs', ')', 'def', 'get_archives', '(', 'project', ')', ':', 'ret', '=', 'OrderedDict', '(', ')', 'for', 'exp', ',', 'a', 'in', 'self', '.', 'config', '.', 'experiments', '.', 'items', '(', ')', ':', 'if', 'self', '.', 'is_archived', '(', 'exp', ')', 'and', 'a', '.', 'project', '==', 'project', ':', 'ret', '.', 'setdefault', '(', 'str', '(', 'a', ')', ',', '[', ']', ')', '.', 'append', '(', 'exp', ')', 'return', 'ret', 'paths', '=', 'OrderedDict', '(', '[', '(', "'conf_dir'", ',', 'config_path', ')', ',', '(', "'_globals_file'", ',', 'global_path', ')', ']', ')', 'if', 'any', '(', 'paths', '.', 'values', '(', ')', ')', ':', 'for', 'key', ',', 'val', 'in', 'paths', '.', 'items', '(', ')', ':', 'if', 'val', ':', 'return', '(', 'self', '.', 'print_', 'or', 'six', '.', 'print_', ')', '(', 'getattr', '(', 'self', '.', 'config', ',', 'key', ')', ')', 'return', 'if', 'archives', ':', 'base', '=', 'OrderedDict', '(', ')', 'current', '=', 'projectname', 'or', 'self', '.', 'projectname', 'if', 'complete', ':', 'for', 'project', 'in', 'self', '.', 'config', '.', 'projects', '.', 'keys', '(', ')', ':', 'd', '=', 'get_archives', '(', 'project', ')', 'if', 'd', ':', 'base', '[', 'project', ']', '=', 'd', 'else', ':', 'base', '[', 'current', ']', '=', 'get_archives', '(', 'current', ')', 'elif', 'exp_path', ':', 'current', '=', 'self', '.', 'experiment', 'base', '=', 'self', '.', 'config', '.', 'experiments', '.', 'exp_files', 'elif', 'project_path', ':', 'current', '=', 'self', '.', 'projectname', 'base', '=', 'OrderedDict', '(', '(', 'key', ',', 'osp', '.', 'join', '(', 'val', ',', "'.project'", ',', "'.project.yml'", ')', ')', 'for', 'key', ',', 'val', 'in', 'self', '.', 'config', '.', 'projects', '.', 'project_paths', '.', 'items', '(', ')', ')', 'elif', 'on_globals', ':', 'complete', '=', 'True', 'no_fix', '=', 'True', 'base', '=', 'self', '.', 'config', '.', 'global_config', 'elif', 'on_projects', ':', 'base', '=', 'OrderedDict', '(', 'self', '.', 'config', '.', 'projects', ')', 'current', '=', 'projectname', 'or', 'self', '.', 'projectname', 'else', ':', 'current', '=', 'self', '.', 'experiment', 'if', 'projectname', 'is', 'None', ':', 'if', 'insert_id', ':', 'base', '=', 'copy', '.', 'deepcopy', '(', 'self', '.', 'config', '.', 'experiments', ')', 'if', 'not', 'complete', ':', 'base', '[', 'current', ']', '[', "'id'", ']', '=', 'current', 'if', 'six', '.', 'PY3', ':', 'base', '[', 'current', ']', '.', 'move_to_end', '(', "'id'", ',', 'last', '=', 'False', ')', 'else', ':', 'base', '=', 'self', '.', 'config', '.', 'experiments', 'if', 'not', 'only_keys', ':', '# make sure the experiments are loaded', 'if', 'complete', ':', 'base', '.', 'load', '(', ')', 'else', ':', 'base', '[', 'current', ']', '# convert to an OrderedDict', 'base', '=', 'base', '.', 'as_ordereddict', '(', ')', 'else', ':', 'base', '=', 'OrderedDict', '(', '(', 'exp', ',', 'self', '.', 'config', '.', 'experiments', '[', 'exp', ']', ')', 'for', 'exp', 'in', 'self', '.', 'config', '.', 'experiments', '.', 'project_map', '[', 'projectname', ']', ')', 'complete', '=', 'True', 'if', 'no_fix', 'and', 'not', '(', 'archives', 'or', 'on_globals', ')', ':', 'for', 'key', ',', 'val', 'in', 'base', '.', 'items', '(', ')', ':', 'if', 'isinstance', '(', 'val', ',', 'dict', ')', ':', 'base', '[', 'key', ']', '=', 'self', '.', 'rel_paths', '(', 'copy', '.', 'deepcopy', '(', 'val', ')', ')', 'if', 'not', 'complete', ':', 'base', '=', 'base', '[', 'current', ']', 'if', 'only_keys', ':', 'base', '=', 'list', '(', 'base', '.', 'keys', '(', ')', ')', 'if', 'not', 'return_dict', ':', 'if', 'isinstance', '(', 'base', ',', 'six', '.', 'string_types', ')', ':', 'ret', '=', 'base', 'else', ':', 'ret', '=', 'ordered_yaml_dump', '(', 'base', ',', 'default_flow_style', '=', 'False', ')', 'return', '(', 'self', '.', 'print_', 'or', 'six', '.', 'print_', ')', '(', 'ret', '.', 'rstrip', '(', ')', ')', 'else', ':', 'return', 'base']
Print information on the experiments Parameters ---------- exp_path: bool If True/set, print the filename of the experiment configuration project_path: bool If True/set, print the filename on the project configuration global_path: bool If True/set, print the filename on the global configuration config_path: bool If True/set, print the path to the configuration directory complete: bool If True/set, the information on all experiments are printed no_fix: bool If set, paths are given relative to the root directory of the project on_projects: bool If set, show information on the projects rather than the experiment on_globals: bool If set, show the global configuration settings projectname: str The name of the project that shall be used. If provided and `on_projects` is not True, the information on all experiments for this project will be shown return_dict: bool If True, the dictionary is returned instead of printed insert_id: bool If True and neither `on_projects`, nor `on_globals`, nor `projectname` is given, the experiment id is inserted in the dictionary only_keys: bool If True, only the keys of the given dictionary are printed archives: bool If True, print the archives and the corresponding experiments for the specified project
['Print', 'information', 'on', 'the', 'experiments']
train
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/__init__.py#L1000-L1127
7,187
ergoithz/browsepy
browsepy/compat.py
fsencode
def fsencode(path, os_name=os.name, fs_encoding=FS_ENCODING, errors=None): ''' Encode given path. :param path: path will be encoded if not using bytes :type path: bytes or str :param os_name: operative system name, defaults to os.name :type os_name: str :param fs_encoding: current filesystem encoding, defaults to autodetected :type fs_encoding: str :return: encoded path :rtype: bytes ''' if isinstance(path, bytes): return path if not errors: use_strict = PY_LEGACY or os_name == 'nt' errors = 'strict' if use_strict else 'surrogateescape' return path.encode(fs_encoding, errors=errors)
python
def fsencode(path, os_name=os.name, fs_encoding=FS_ENCODING, errors=None): ''' Encode given path. :param path: path will be encoded if not using bytes :type path: bytes or str :param os_name: operative system name, defaults to os.name :type os_name: str :param fs_encoding: current filesystem encoding, defaults to autodetected :type fs_encoding: str :return: encoded path :rtype: bytes ''' if isinstance(path, bytes): return path if not errors: use_strict = PY_LEGACY or os_name == 'nt' errors = 'strict' if use_strict else 'surrogateescape' return path.encode(fs_encoding, errors=errors)
['def', 'fsencode', '(', 'path', ',', 'os_name', '=', 'os', '.', 'name', ',', 'fs_encoding', '=', 'FS_ENCODING', ',', 'errors', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'path', ',', 'bytes', ')', ':', 'return', 'path', 'if', 'not', 'errors', ':', 'use_strict', '=', 'PY_LEGACY', 'or', 'os_name', '==', "'nt'", 'errors', '=', "'strict'", 'if', 'use_strict', 'else', "'surrogateescape'", 'return', 'path', '.', 'encode', '(', 'fs_encoding', ',', 'errors', '=', 'errors', ')']
Encode given path. :param path: path will be encoded if not using bytes :type path: bytes or str :param os_name: operative system name, defaults to os.name :type os_name: str :param fs_encoding: current filesystem encoding, defaults to autodetected :type fs_encoding: str :return: encoded path :rtype: bytes
['Encode', 'given', 'path', '.']
train
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/compat.py#L63-L81
7,188
sepandhaghighi/pycm
pycm/pycm_overall_func.py
ncr
def ncr(n, r): """ Calculate n choose r. :param n: n :type n : int :param r: r :type r :int :return: n choose r as int """ r = min(r, n - r) numer = reduce(op.mul, range(n, n - r, -1), 1) denom = reduce(op.mul, range(1, r + 1), 1) return numer // denom
python
def ncr(n, r): """ Calculate n choose r. :param n: n :type n : int :param r: r :type r :int :return: n choose r as int """ r = min(r, n - r) numer = reduce(op.mul, range(n, n - r, -1), 1) denom = reduce(op.mul, range(1, r + 1), 1) return numer // denom
['def', 'ncr', '(', 'n', ',', 'r', ')', ':', 'r', '=', 'min', '(', 'r', ',', 'n', '-', 'r', ')', 'numer', '=', 'reduce', '(', 'op', '.', 'mul', ',', 'range', '(', 'n', ',', 'n', '-', 'r', ',', '-', '1', ')', ',', '1', ')', 'denom', '=', 'reduce', '(', 'op', '.', 'mul', ',', 'range', '(', '1', ',', 'r', '+', '1', ')', ',', '1', ')', 'return', 'numer', '//', 'denom']
Calculate n choose r. :param n: n :type n : int :param r: r :type r :int :return: n choose r as int
['Calculate', 'n', 'choose', 'r', '.']
train
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_overall_func.py#L199-L212
7,189
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py
Executor.get_build_scanner_path
def get_build_scanner_path(self, scanner): """Fetch the scanner path for this executor's targets and sources. """ env = self.get_build_env() try: cwd = self.batches[0].targets[0].cwd except (IndexError, AttributeError): cwd = None return scanner.path(env, cwd, self.get_all_targets(), self.get_all_sources())
python
def get_build_scanner_path(self, scanner): """Fetch the scanner path for this executor's targets and sources. """ env = self.get_build_env() try: cwd = self.batches[0].targets[0].cwd except (IndexError, AttributeError): cwd = None return scanner.path(env, cwd, self.get_all_targets(), self.get_all_sources())
['def', 'get_build_scanner_path', '(', 'self', ',', 'scanner', ')', ':', 'env', '=', 'self', '.', 'get_build_env', '(', ')', 'try', ':', 'cwd', '=', 'self', '.', 'batches', '[', '0', ']', '.', 'targets', '[', '0', ']', '.', 'cwd', 'except', '(', 'IndexError', ',', 'AttributeError', ')', ':', 'cwd', '=', 'None', 'return', 'scanner', '.', 'path', '(', 'env', ',', 'cwd', ',', 'self', '.', 'get_all_targets', '(', ')', ',', 'self', '.', 'get_all_sources', '(', ')', ')']
Fetch the scanner path for this executor's targets and sources.
['Fetch', 'the', 'scanner', 'path', 'for', 'this', 'executor', 's', 'targets', 'and', 'sources', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py#L372-L382
7,190
veltzer/pydmt
pydmt/utils/python.py
hlp_source_under
def hlp_source_under(folder): """ this function finds all the python packages under a folder and write the 'packages' and 'package_dir' entries for a python setup.py script """ # walk the folder and find the __init__.py entries for packages. packages = [] package_dir = dict() for root, dirs, files in os.walk(folder): for file in files: if file != '__init__.py': continue full = os.path.dirname(os.path.join(root, file)) relative = os.path.relpath(full, folder) packages.append(relative) package_dir[relative] = full # we use pprint because we want the order to always remain the same return 'packages={0},\npackage_dir={1}'.format(sorted(packages), pprint.pformat(package_dir))
python
def hlp_source_under(folder): """ this function finds all the python packages under a folder and write the 'packages' and 'package_dir' entries for a python setup.py script """ # walk the folder and find the __init__.py entries for packages. packages = [] package_dir = dict() for root, dirs, files in os.walk(folder): for file in files: if file != '__init__.py': continue full = os.path.dirname(os.path.join(root, file)) relative = os.path.relpath(full, folder) packages.append(relative) package_dir[relative] = full # we use pprint because we want the order to always remain the same return 'packages={0},\npackage_dir={1}'.format(sorted(packages), pprint.pformat(package_dir))
['def', 'hlp_source_under', '(', 'folder', ')', ':', '# walk the folder and find the __init__.py entries for packages.', 'packages', '=', '[', ']', 'package_dir', '=', 'dict', '(', ')', 'for', 'root', ',', 'dirs', ',', 'files', 'in', 'os', '.', 'walk', '(', 'folder', ')', ':', 'for', 'file', 'in', 'files', ':', 'if', 'file', '!=', "'__init__.py'", ':', 'continue', 'full', '=', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'file', ')', ')', 'relative', '=', 'os', '.', 'path', '.', 'relpath', '(', 'full', ',', 'folder', ')', 'packages', '.', 'append', '(', 'relative', ')', 'package_dir', '[', 'relative', ']', '=', 'full', '# we use pprint because we want the order to always remain the same', 'return', "'packages={0},\\npackage_dir={1}'", '.', 'format', '(', 'sorted', '(', 'packages', ')', ',', 'pprint', '.', 'pformat', '(', 'package_dir', ')', ')']
this function finds all the python packages under a folder and write the 'packages' and 'package_dir' entries for a python setup.py script
['this', 'function', 'finds', 'all', 'the', 'python', 'packages', 'under', 'a', 'folder', 'and', 'write', 'the', 'packages', 'and', 'package_dir', 'entries', 'for', 'a', 'python', 'setup', '.', 'py', 'script']
train
https://github.com/veltzer/pydmt/blob/11d3db7ea079756c1e4137d3dd8a2cabbcc98bf7/pydmt/utils/python.py#L6-L24
7,191
buguroo/pyknow
pyknow/matchers/rete/nodes.py
ConflictSetNode._activate
def _activate(self, token): """Activate this node for the given token.""" info = token.to_info() activation = Activation( self.rule, frozenset(info.data), {k: v for k, v in info.context if isinstance(k, str)}) if token.is_valid(): if info not in self.memory: self.memory.add(info) if activation in self.removed: self.removed.remove(activation) else: self.added.add(activation) else: try: self.memory.remove(info) except ValueError: pass else: if activation in self.added: self.added.remove(activation) else: self.removed.add(activation)
python
def _activate(self, token): """Activate this node for the given token.""" info = token.to_info() activation = Activation( self.rule, frozenset(info.data), {k: v for k, v in info.context if isinstance(k, str)}) if token.is_valid(): if info not in self.memory: self.memory.add(info) if activation in self.removed: self.removed.remove(activation) else: self.added.add(activation) else: try: self.memory.remove(info) except ValueError: pass else: if activation in self.added: self.added.remove(activation) else: self.removed.add(activation)
['def', '_activate', '(', 'self', ',', 'token', ')', ':', 'info', '=', 'token', '.', 'to_info', '(', ')', 'activation', '=', 'Activation', '(', 'self', '.', 'rule', ',', 'frozenset', '(', 'info', '.', 'data', ')', ',', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'info', '.', 'context', 'if', 'isinstance', '(', 'k', ',', 'str', ')', '}', ')', 'if', 'token', '.', 'is_valid', '(', ')', ':', 'if', 'info', 'not', 'in', 'self', '.', 'memory', ':', 'self', '.', 'memory', '.', 'add', '(', 'info', ')', 'if', 'activation', 'in', 'self', '.', 'removed', ':', 'self', '.', 'removed', '.', 'remove', '(', 'activation', ')', 'else', ':', 'self', '.', 'added', '.', 'add', '(', 'activation', ')', 'else', ':', 'try', ':', 'self', '.', 'memory', '.', 'remove', '(', 'info', ')', 'except', 'ValueError', ':', 'pass', 'else', ':', 'if', 'activation', 'in', 'self', '.', 'added', ':', 'self', '.', 'added', '.', 'remove', '(', 'activation', ')', 'else', ':', 'self', '.', 'removed', '.', 'add', '(', 'activation', ')']
Activate this node for the given token.
['Activate', 'this', 'node', 'for', 'the', 'given', 'token', '.']
train
https://github.com/buguroo/pyknow/blob/48818336f2e9a126f1964f2d8dc22d37ff800fe8/pyknow/matchers/rete/nodes.py#L262-L288
7,192
ic-labs/django-icekit
icekit/publishing/models.py
PublishingModel.is_within_publication_dates
def is_within_publication_dates(obj, timestamp=None): """ Return True if the given timestamp (or ``now()`` by default) is witin any publication start/end date constraints. """ if timestamp is None: timestamp = timezone.now() start_date_ok = not obj.publication_date \ or obj.publication_date <= timestamp end_date_ok = not obj.publication_end_date \ or obj.publication_end_date > timestamp return start_date_ok and end_date_ok
python
def is_within_publication_dates(obj, timestamp=None): """ Return True if the given timestamp (or ``now()`` by default) is witin any publication start/end date constraints. """ if timestamp is None: timestamp = timezone.now() start_date_ok = not obj.publication_date \ or obj.publication_date <= timestamp end_date_ok = not obj.publication_end_date \ or obj.publication_end_date > timestamp return start_date_ok and end_date_ok
['def', 'is_within_publication_dates', '(', 'obj', ',', 'timestamp', '=', 'None', ')', ':', 'if', 'timestamp', 'is', 'None', ':', 'timestamp', '=', 'timezone', '.', 'now', '(', ')', 'start_date_ok', '=', 'not', 'obj', '.', 'publication_date', 'or', 'obj', '.', 'publication_date', '<=', 'timestamp', 'end_date_ok', '=', 'not', 'obj', '.', 'publication_end_date', 'or', 'obj', '.', 'publication_end_date', '>', 'timestamp', 'return', 'start_date_ok', 'and', 'end_date_ok']
Return True if the given timestamp (or ``now()`` by default) is witin any publication start/end date constraints.
['Return', 'True', 'if', 'the', 'given', 'timestamp', '(', 'or', 'now', '()', 'by', 'default', ')', 'is', 'witin', 'any', 'publication', 'start', '/', 'end', 'date', 'constraints', '.']
train
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/publishing/models.py#L113-L124
7,193
azogue/i2csense
i2csense/bh1750.py
BH1750._get_result
def _get_result(self) -> float: """Return current measurement result in lx.""" try: data = self._bus.read_word_data(self._i2c_add, self._mode) self._ok = True except OSError as exc: self.log_error("Bad reading in bus: %s", exc) self._ok = False return -1 count = data >> 8 | (data & 0xff) << 8 mode2coeff = 2 if self._high_res else 1 ratio = 1 / (1.2 * (self._mtreg / 69.0) * mode2coeff) return ratio * count
python
def _get_result(self) -> float: """Return current measurement result in lx.""" try: data = self._bus.read_word_data(self._i2c_add, self._mode) self._ok = True except OSError as exc: self.log_error("Bad reading in bus: %s", exc) self._ok = False return -1 count = data >> 8 | (data & 0xff) << 8 mode2coeff = 2 if self._high_res else 1 ratio = 1 / (1.2 * (self._mtreg / 69.0) * mode2coeff) return ratio * count
['def', '_get_result', '(', 'self', ')', '->', 'float', ':', 'try', ':', 'data', '=', 'self', '.', '_bus', '.', 'read_word_data', '(', 'self', '.', '_i2c_add', ',', 'self', '.', '_mode', ')', 'self', '.', '_ok', '=', 'True', 'except', 'OSError', 'as', 'exc', ':', 'self', '.', 'log_error', '(', '"Bad reading in bus: %s"', ',', 'exc', ')', 'self', '.', '_ok', '=', 'False', 'return', '-', '1', 'count', '=', 'data', '>>', '8', '|', '(', 'data', '&', '0xff', ')', '<<', '8', 'mode2coeff', '=', '2', 'if', 'self', '.', '_high_res', 'else', '1', 'ratio', '=', '1', '/', '(', '1.2', '*', '(', 'self', '.', '_mtreg', '/', '69.0', ')', '*', 'mode2coeff', ')', 'return', 'ratio', '*', 'count']
Return current measurement result in lx.
['Return', 'current', 'measurement', 'result', 'in', 'lx', '.']
train
https://github.com/azogue/i2csense/blob/ecc6806dcee9de827a5414a9e836d271fedca9b9/i2csense/bh1750.py#L108-L121
7,194
EventRegistry/event-registry-python
eventregistry/Analytics.py
Analytics.trainTopicGetTrainedTopic
def trainTopicGetTrainedTopic(self, uri, maxConcepts = 20, maxCategories = 10, ignoreConceptTypes=[], idfNormalization = True): """ retrieve topic for the topic for which you have already finished training @param uri: uri of the topic (obtained by calling trainTopicCreateTopic method) @param maxConcepts: number of top concepts to retrieve in the topic @param maxCategories: number of top categories to retrieve in the topic @param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those @param idfNormalization: should the concepts be normalized by punishing the commonly mentioned concepts @param returns: returns the trained topic: { concepts: [], categories: [] } """ return self._er.jsonRequestAnalytics("/api/v1/trainTopic", { "action": "getTrainedTopic", "uri": uri, "maxConcepts": maxConcepts, "maxCategories": maxCategories, "idfNormalization": idfNormalization })
python
def trainTopicGetTrainedTopic(self, uri, maxConcepts = 20, maxCategories = 10, ignoreConceptTypes=[], idfNormalization = True): """ retrieve topic for the topic for which you have already finished training @param uri: uri of the topic (obtained by calling trainTopicCreateTopic method) @param maxConcepts: number of top concepts to retrieve in the topic @param maxCategories: number of top categories to retrieve in the topic @param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those @param idfNormalization: should the concepts be normalized by punishing the commonly mentioned concepts @param returns: returns the trained topic: { concepts: [], categories: [] } """ return self._er.jsonRequestAnalytics("/api/v1/trainTopic", { "action": "getTrainedTopic", "uri": uri, "maxConcepts": maxConcepts, "maxCategories": maxCategories, "idfNormalization": idfNormalization })
['def', 'trainTopicGetTrainedTopic', '(', 'self', ',', 'uri', ',', 'maxConcepts', '=', '20', ',', 'maxCategories', '=', '10', ',', 'ignoreConceptTypes', '=', '[', ']', ',', 'idfNormalization', '=', 'True', ')', ':', 'return', 'self', '.', '_er', '.', 'jsonRequestAnalytics', '(', '"/api/v1/trainTopic"', ',', '{', '"action"', ':', '"getTrainedTopic"', ',', '"uri"', ':', 'uri', ',', '"maxConcepts"', ':', 'maxConcepts', ',', '"maxCategories"', ':', 'maxCategories', ',', '"idfNormalization"', ':', 'idfNormalization', '}', ')']
retrieve topic for the topic for which you have already finished training @param uri: uri of the topic (obtained by calling trainTopicCreateTopic method) @param maxConcepts: number of top concepts to retrieve in the topic @param maxCategories: number of top categories to retrieve in the topic @param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those @param idfNormalization: should the concepts be normalized by punishing the commonly mentioned concepts @param returns: returns the trained topic: { concepts: [], categories: [] }
['retrieve', 'topic', 'for', 'the', 'topic', 'for', 'which', 'you', 'have', 'already', 'finished', 'training']
train
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L172-L183
7,195
undertheseanlp/underthesea
travis_pypi_setup.py
prepend_line
def prepend_line(filepath, line): """Rewrite a file adding a line to its beginning. """ with open(filepath) as f: lines = f.readlines() lines.insert(0, line) with open(filepath, 'w') as f: f.writelines(lines)
python
def prepend_line(filepath, line): """Rewrite a file adding a line to its beginning. """ with open(filepath) as f: lines = f.readlines() lines.insert(0, line) with open(filepath, 'w') as f: f.writelines(lines)
['def', 'prepend_line', '(', 'filepath', ',', 'line', ')', ':', 'with', 'open', '(', 'filepath', ')', 'as', 'f', ':', 'lines', '=', 'f', '.', 'readlines', '(', ')', 'lines', '.', 'insert', '(', '0', ',', 'line', ')', 'with', 'open', '(', 'filepath', ',', "'w'", ')', 'as', 'f', ':', 'f', '.', 'writelines', '(', 'lines', ')']
Rewrite a file adding a line to its beginning.
['Rewrite', 'a', 'file', 'adding', 'a', 'line', 'to', 'its', 'beginning', '.']
train
https://github.com/undertheseanlp/underthesea/blob/3663427da65e2b449e9135e3812edecb938b2319/travis_pypi_setup.py#L69-L78
7,196
SuLab/WikidataIntegrator
wikidataintegrator/wdi_core.py
WDItemEngine.update
def update(self, data, append_value=None): """ This method takes data, and modifies the Wikidata item. This works together with the data already provided via the constructor or if the constructor is being instantiated with search_only=True. In the latter case, this allows for checking the item data before deciding which new data should be written to the Wikidata item. The actual write to Wikidata only happens on calling of the write() method. If data has been provided already via the constructor, data provided via the update() method will be appended to these data. :param data: A list of Wikidata statment items inheriting from WDBaseDataType :type data: list :param append_value: list with Wikidata property strings where the values should only be appended, not overwritten. :type: list """ assert type(data) == list if append_value: assert type(append_value) == list self.append_value.extend(append_value) self.data.extend(data) self.statements = copy.deepcopy(self.original_statements) if not __debug__: print(self.data) if self.fast_run: self.init_fastrun() if self.require_write and self.fast_run: self.init_data_load() self.__construct_claim_json() self.__check_integrity() elif not self.fast_run: self.__construct_claim_json() self.__check_integrity()
python
def update(self, data, append_value=None): """ This method takes data, and modifies the Wikidata item. This works together with the data already provided via the constructor or if the constructor is being instantiated with search_only=True. In the latter case, this allows for checking the item data before deciding which new data should be written to the Wikidata item. The actual write to Wikidata only happens on calling of the write() method. If data has been provided already via the constructor, data provided via the update() method will be appended to these data. :param data: A list of Wikidata statment items inheriting from WDBaseDataType :type data: list :param append_value: list with Wikidata property strings where the values should only be appended, not overwritten. :type: list """ assert type(data) == list if append_value: assert type(append_value) == list self.append_value.extend(append_value) self.data.extend(data) self.statements = copy.deepcopy(self.original_statements) if not __debug__: print(self.data) if self.fast_run: self.init_fastrun() if self.require_write and self.fast_run: self.init_data_load() self.__construct_claim_json() self.__check_integrity() elif not self.fast_run: self.__construct_claim_json() self.__check_integrity()
['def', 'update', '(', 'self', ',', 'data', ',', 'append_value', '=', 'None', ')', ':', 'assert', 'type', '(', 'data', ')', '==', 'list', 'if', 'append_value', ':', 'assert', 'type', '(', 'append_value', ')', '==', 'list', 'self', '.', 'append_value', '.', 'extend', '(', 'append_value', ')', 'self', '.', 'data', '.', 'extend', '(', 'data', ')', 'self', '.', 'statements', '=', 'copy', '.', 'deepcopy', '(', 'self', '.', 'original_statements', ')', 'if', 'not', '__debug__', ':', 'print', '(', 'self', '.', 'data', ')', 'if', 'self', '.', 'fast_run', ':', 'self', '.', 'init_fastrun', '(', ')', 'if', 'self', '.', 'require_write', 'and', 'self', '.', 'fast_run', ':', 'self', '.', 'init_data_load', '(', ')', 'self', '.', '__construct_claim_json', '(', ')', 'self', '.', '__check_integrity', '(', ')', 'elif', 'not', 'self', '.', 'fast_run', ':', 'self', '.', '__construct_claim_json', '(', ')', 'self', '.', '__check_integrity', '(', ')']
This method takes data, and modifies the Wikidata item. This works together with the data already provided via the constructor or if the constructor is being instantiated with search_only=True. In the latter case, this allows for checking the item data before deciding which new data should be written to the Wikidata item. The actual write to Wikidata only happens on calling of the write() method. If data has been provided already via the constructor, data provided via the update() method will be appended to these data. :param data: A list of Wikidata statment items inheriting from WDBaseDataType :type data: list :param append_value: list with Wikidata property strings where the values should only be appended, not overwritten. :type: list
['This', 'method', 'takes', 'data', 'and', 'modifies', 'the', 'Wikidata', 'item', '.', 'This', 'works', 'together', 'with', 'the', 'data', 'already', 'provided', 'via', 'the', 'constructor', 'or', 'if', 'the', 'constructor', 'is', 'being', 'instantiated', 'with', 'search_only', '=', 'True', '.', 'In', 'the', 'latter', 'case', 'this', 'allows', 'for', 'checking', 'the', 'item', 'data', 'before', 'deciding', 'which', 'new', 'data', 'should', 'be', 'written', 'to', 'the', 'Wikidata', 'item', '.', 'The', 'actual', 'write', 'to', 'Wikidata', 'only', 'happens', 'on', 'calling', 'of', 'the', 'write', '()', 'method', '.', 'If', 'data', 'has', 'been', 'provided', 'already', 'via', 'the', 'constructor', 'data', 'provided', 'via', 'the', 'update', '()', 'method', 'will', 'be', 'appended', 'to', 'these', 'data', '.', ':', 'param', 'data', ':', 'A', 'list', 'of', 'Wikidata', 'statment', 'items', 'inheriting', 'from', 'WDBaseDataType', ':', 'type', 'data', ':', 'list', ':', 'param', 'append_value', ':', 'list', 'with', 'Wikidata', 'property', 'strings', 'where', 'the', 'values', 'should', 'only', 'be', 'appended', 'not', 'overwritten', '.', ':', 'type', ':', 'list']
train
https://github.com/SuLab/WikidataIntegrator/blob/8ceb2ed1c08fec070ec9edfcf7db7b8691481b62/wikidataintegrator/wdi_core.py#L635-L669
7,197
douban/brownant
brownant/pipeline/base.py
PipelineProperty.get_attr
def get_attr(self, obj, name): """Get attribute of the target object with the configured attribute name in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names` of this instance. :param obj: the target object. :type obj: :class:`~brownant.dinergate.Dinergate` :param name: the internal name used in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names`. (.e.g. `"text_attr"`) """ attr_name = self.attr_names[name] return getattr(obj, attr_name)
python
def get_attr(self, obj, name): """Get attribute of the target object with the configured attribute name in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names` of this instance. :param obj: the target object. :type obj: :class:`~brownant.dinergate.Dinergate` :param name: the internal name used in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names`. (.e.g. `"text_attr"`) """ attr_name = self.attr_names[name] return getattr(obj, attr_name)
['def', 'get_attr', '(', 'self', ',', 'obj', ',', 'name', ')', ':', 'attr_name', '=', 'self', '.', 'attr_names', '[', 'name', ']', 'return', 'getattr', '(', 'obj', ',', 'attr_name', ')']
Get attribute of the target object with the configured attribute name in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names` of this instance. :param obj: the target object. :type obj: :class:`~brownant.dinergate.Dinergate` :param name: the internal name used in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names`. (.e.g. `"text_attr"`)
['Get', 'attribute', 'of', 'the', 'target', 'object', 'with', 'the', 'configured', 'attribute', 'name', 'in', 'the', ':', 'attr', ':', '~brownant', '.', 'pipeline', '.', 'base', '.', 'PipelineProperty', '.', 'attr_names', 'of', 'this', 'instance', '.']
train
https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/pipeline/base.py#L83-L95
7,198
SBRG/ssbio
ssbio/databases/uniprot.py
uniprot_reviewed_checker_batch
def uniprot_reviewed_checker_batch(uniprot_ids): """Batch check if uniprot IDs are reviewed or not Args: uniprot_ids: UniProt ID or list of UniProt IDs Returns: A dictionary of {UniProtID: Boolean} """ uniprot_ids = ssbio.utils.force_list(uniprot_ids) invalid_ids = [i for i in uniprot_ids if not is_valid_uniprot_id(i)] uniprot_ids = [i for i in uniprot_ids if is_valid_uniprot_id(i)] if invalid_ids: warnings.warn("Invalid UniProt IDs {} will be ignored".format(invalid_ids)) # splitting query up into managable sizes (200 IDs each) Nmax = 200 N, rest = divmod(len(uniprot_ids), Nmax) uni_rev_dict = {} if rest > 0: N += 1 for i in range(0, N): i1 = i * Nmax i2 = (i + 1) * Nmax if i2 > len(uniprot_ids): i2 = len(uniprot_ids) query = uniprot_ids[i1:i2] query_string = '' for x in query: query_string += 'id:' + x + '+OR+' query_string = query_string.strip('+OR+') uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab')) uni_rev_df = pd.read_table(uni_rev_raw, sep='\t', index_col=0) uni_rev_df = uni_rev_df.fillna(False) # no_metadata = uni_rev_df[pd.isnull(uni_rev_df.Status)].index.tolist() # if no_metadata: # warnings.warn("Unable to retrieve metadata for {}.".format(no_metadata)) uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)] uni_rev_df = uni_rev_df.replace(to_replace="reviewed", value=True) uni_rev_df = uni_rev_df.replace(to_replace="unreviewed", value=False) uni_rev_dict_adder = uni_rev_df.to_dict()['Status'] uni_rev_dict.update(uni_rev_dict_adder) return uni_rev_dict
python
def uniprot_reviewed_checker_batch(uniprot_ids): """Batch check if uniprot IDs are reviewed or not Args: uniprot_ids: UniProt ID or list of UniProt IDs Returns: A dictionary of {UniProtID: Boolean} """ uniprot_ids = ssbio.utils.force_list(uniprot_ids) invalid_ids = [i for i in uniprot_ids if not is_valid_uniprot_id(i)] uniprot_ids = [i for i in uniprot_ids if is_valid_uniprot_id(i)] if invalid_ids: warnings.warn("Invalid UniProt IDs {} will be ignored".format(invalid_ids)) # splitting query up into managable sizes (200 IDs each) Nmax = 200 N, rest = divmod(len(uniprot_ids), Nmax) uni_rev_dict = {} if rest > 0: N += 1 for i in range(0, N): i1 = i * Nmax i2 = (i + 1) * Nmax if i2 > len(uniprot_ids): i2 = len(uniprot_ids) query = uniprot_ids[i1:i2] query_string = '' for x in query: query_string += 'id:' + x + '+OR+' query_string = query_string.strip('+OR+') uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab')) uni_rev_df = pd.read_table(uni_rev_raw, sep='\t', index_col=0) uni_rev_df = uni_rev_df.fillna(False) # no_metadata = uni_rev_df[pd.isnull(uni_rev_df.Status)].index.tolist() # if no_metadata: # warnings.warn("Unable to retrieve metadata for {}.".format(no_metadata)) uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)] uni_rev_df = uni_rev_df.replace(to_replace="reviewed", value=True) uni_rev_df = uni_rev_df.replace(to_replace="unreviewed", value=False) uni_rev_dict_adder = uni_rev_df.to_dict()['Status'] uni_rev_dict.update(uni_rev_dict_adder) return uni_rev_dict
['def', 'uniprot_reviewed_checker_batch', '(', 'uniprot_ids', ')', ':', 'uniprot_ids', '=', 'ssbio', '.', 'utils', '.', 'force_list', '(', 'uniprot_ids', ')', 'invalid_ids', '=', '[', 'i', 'for', 'i', 'in', 'uniprot_ids', 'if', 'not', 'is_valid_uniprot_id', '(', 'i', ')', ']', 'uniprot_ids', '=', '[', 'i', 'for', 'i', 'in', 'uniprot_ids', 'if', 'is_valid_uniprot_id', '(', 'i', ')', ']', 'if', 'invalid_ids', ':', 'warnings', '.', 'warn', '(', '"Invalid UniProt IDs {} will be ignored"', '.', 'format', '(', 'invalid_ids', ')', ')', '# splitting query up into managable sizes (200 IDs each)', 'Nmax', '=', '200', 'N', ',', 'rest', '=', 'divmod', '(', 'len', '(', 'uniprot_ids', ')', ',', 'Nmax', ')', 'uni_rev_dict', '=', '{', '}', 'if', 'rest', '>', '0', ':', 'N', '+=', '1', 'for', 'i', 'in', 'range', '(', '0', ',', 'N', ')', ':', 'i1', '=', 'i', '*', 'Nmax', 'i2', '=', '(', 'i', '+', '1', ')', '*', 'Nmax', 'if', 'i2', '>', 'len', '(', 'uniprot_ids', ')', ':', 'i2', '=', 'len', '(', 'uniprot_ids', ')', 'query', '=', 'uniprot_ids', '[', 'i1', ':', 'i2', ']', 'query_string', '=', "''", 'for', 'x', 'in', 'query', ':', 'query_string', '+=', "'id:'", '+', 'x', '+', "'+OR+'", 'query_string', '=', 'query_string', '.', 'strip', '(', "'+OR+'", ')', 'uni_rev_raw', '=', 'StringIO', '(', 'bsup', '.', 'search', '(', 'query_string', ',', 'columns', '=', "'id,reviewed'", ',', 'frmt', '=', "'tab'", ')', ')', 'uni_rev_df', '=', 'pd', '.', 'read_table', '(', 'uni_rev_raw', ',', 'sep', '=', "'\\t'", ',', 'index_col', '=', '0', ')', 'uni_rev_df', '=', 'uni_rev_df', '.', 'fillna', '(', 'False', ')', '# no_metadata = uni_rev_df[pd.isnull(uni_rev_df.Status)].index.tolist()', '# if no_metadata:', '# warnings.warn("Unable to retrieve metadata for {}.".format(no_metadata))', 'uni_rev_df', '=', 'uni_rev_df', '[', 'pd', '.', 'notnull', '(', 'uni_rev_df', '.', 'Status', ')', ']', 'uni_rev_df', '=', 'uni_rev_df', '.', 'replace', '(', 'to_replace', '=', '"reviewed"', ',', 'value', '=', 'True', ')', 'uni_rev_df', '=', 'uni_rev_df', '.', 'replace', '(', 'to_replace', '=', '"unreviewed"', ',', 'value', '=', 'False', ')', 'uni_rev_dict_adder', '=', 'uni_rev_df', '.', 'to_dict', '(', ')', '[', "'Status'", ']', 'uni_rev_dict', '.', 'update', '(', 'uni_rev_dict_adder', ')', 'return', 'uni_rev_dict']
Batch check if uniprot IDs are reviewed or not Args: uniprot_ids: UniProt ID or list of UniProt IDs Returns: A dictionary of {UniProtID: Boolean}
['Batch', 'check', 'if', 'uniprot', 'IDs', 'are', 'reviewed', 'or', 'not']
train
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L353-L406
7,199
shapiromatron/bmds
bmds/logic/recommender.py
Recommender._get_recommended_models
def _get_recommended_models(models, fld_name): """ Returns a list of models which have the minimum target field value for a given field name (AIC or BMDL). """ target_value = min([model.output[fld_name] for model in models]) return [model for model in models if model.output[fld_name] == target_value]
python
def _get_recommended_models(models, fld_name): """ Returns a list of models which have the minimum target field value for a given field name (AIC or BMDL). """ target_value = min([model.output[fld_name] for model in models]) return [model for model in models if model.output[fld_name] == target_value]
['def', '_get_recommended_models', '(', 'models', ',', 'fld_name', ')', ':', 'target_value', '=', 'min', '(', '[', 'model', '.', 'output', '[', 'fld_name', ']', 'for', 'model', 'in', 'models', ']', ')', 'return', '[', 'model', 'for', 'model', 'in', 'models', 'if', 'model', '.', 'output', '[', 'fld_name', ']', '==', 'target_value', ']']
Returns a list of models which have the minimum target field value for a given field name (AIC or BMDL).
['Returns', 'a', 'list', 'of', 'models', 'which', 'have', 'the', 'minimum', 'target', 'field', 'value', 'for', 'a', 'given', 'field', 'name', '(', 'AIC', 'or', 'BMDL', ')', '.']
train
https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/logic/recommender.py#L136-L142