Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
4,600
SatelliteQE/nailgun
nailgun/config.py
BaseServerConfig.delete
def delete(cls, label='default', path=None): """Delete a server configuration. This method is thread safe. :param label: A string. The configuration identified by ``label`` is deleted. :param path: A string. The configuration file to be manipulated. Defaults to what is returned by :func:`nailgun.config._get_config_file_path`. :returns: ``None`` """ if path is None: path = _get_config_file_path( cls._xdg_config_dir, cls._xdg_config_file ) cls._file_lock.acquire() try: with open(path) as config_file: config = json.load(config_file) del config[label] with open(path, 'w') as config_file: json.dump(config, config_file) finally: cls._file_lock.release()
python
def delete(cls, label='default', path=None): """Delete a server configuration. This method is thread safe. :param label: A string. The configuration identified by ``label`` is deleted. :param path: A string. The configuration file to be manipulated. Defaults to what is returned by :func:`nailgun.config._get_config_file_path`. :returns: ``None`` """ if path is None: path = _get_config_file_path( cls._xdg_config_dir, cls._xdg_config_file ) cls._file_lock.acquire() try: with open(path) as config_file: config = json.load(config_file) del config[label] with open(path, 'w') as config_file: json.dump(config, config_file) finally: cls._file_lock.release()
['def', 'delete', '(', 'cls', ',', 'label', '=', "'default'", ',', 'path', '=', 'None', ')', ':', 'if', 'path', 'is', 'None', ':', 'path', '=', '_get_config_file_path', '(', 'cls', '.', '_xdg_config_dir', ',', 'cls', '.', '_xdg_config_file', ')', 'cls', '.', '_file_lock', '.', 'acquire', '(', ')', 'try', ':', 'with', 'open', '(', 'path', ')', 'as', 'config_file', ':', 'config', '=', 'json', '.', 'load', '(', 'config_file', ')', 'del', 'config', '[', 'label', ']', 'with', 'open', '(', 'path', ',', "'w'", ')', 'as', 'config_file', ':', 'json', '.', 'dump', '(', 'config', ',', 'config_file', ')', 'finally', ':', 'cls', '.', '_file_lock', '.', 'release', '(', ')']
Delete a server configuration. This method is thread safe. :param label: A string. The configuration identified by ``label`` is deleted. :param path: A string. The configuration file to be manipulated. Defaults to what is returned by :func:`nailgun.config._get_config_file_path`. :returns: ``None``
['Delete', 'a', 'server', 'configuration', '.']
train
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/config.py#L125-L151
4,601
houtianze/bypy
bypy/bypy.py
main
def main(argv=None): # IGNORE:C0111 ''' Main Entry ''' by = None reqres = check_requirements() if reqres == CheckResult.Error: perr("Requirement checking failed") sys.exit(const.EFatal) try: result = const.ENoError if argv is None: argv = sys.argv else: sys.argv.extend(argv) setuphandlers() parser = getparser() args = parser.parse_args() dl_args = '' if not args.downloader_args: if const.DownloaderArgsEnvKey in os.environ: dl_args = os.environ[const.DownloaderArgsEnvKey] else: prefixlen = len(const.DownloaderArgsIsFilePrefix) if args.downloader_args[:prefixlen] == const.DownloaderArgsIsFilePrefix: # file with io.open(args.downloader_args[prefixlen:], 'r', encoding = 'utf-8') as f: dl_args = f.read().strip() else: dl_args = args.downloader_args # house-keeping reminder # TODO: may need to move into ByPy for customized config dir if os.path.exists(const.HashCachePath): cachesize = getfilesize(const.HashCachePath) if cachesize > 10 * const.OneM or cachesize == -1: pwarn(( "*** WARNING ***\n" "Hash Cache file '{0}' is very large ({1}).\n" "This may affect program's performance (high memory consumption).\n" "You can first try to run 'bypy.py cleancache' to slim the file.\n" "But if the file size won't reduce (this warning persists)," " you may consider deleting / moving the Hash Cache file '{0}'\n" "*** WARNING ***\n\n\n").format(const.HashCachePath, human_size(cachesize))) # check for situations that require no ByPy object creation first if args.clean >= 1: return clean_prog_files(args.clean, args.verbose, args.configdir) # some arguments need some processing try: slice_size = interpret_size(args.slice) except (ValueError, KeyError): perr("Error: Invalid slice size specified '{}'".format(args.slice)) return const.EArgument try: chunk_size = interpret_size(args.chunk) except (ValueError, KeyError): perr("Error: Invalid slice size specified '{}'".format(args.slice)) return const.EArgument if len(args.command) <= 0 or \ (len(args.command) == 1 and args.command[0].lower() == 'help'): parser.print_help() return const.EArgument elif len(args.command) == 2 and args.command[0].lower() == 'help': ByPy.help(args.command[1]) return const.EArgument elif args.command[0] in ByPy.__dict__: # dir(ByPy), dir(by) #timeout = args.timeout or None cached.usecache = not args.forcehash bypyopt = { 'slice_size': slice_size, 'dl_chunk_size': chunk_size, 'verify': args.verify, 'retry': args.retry, 'timeout': args.timeout, 'quit_when_fail': args.quit, 'resumedownload': args.resumedl, 'incregex': args.incregex, 'ondup': args.ondup, 'followlink': args.followlink, 'checkssl': args.checkssl, 'cacerts': args.cacerts, 'rapiduploadonly': args.rapiduploadonly, 'mirror': args.mirror, 'selectmirror': args.selectmirror, 'configdir': args.configdir, 'resumedl_revertcount': args.resumedl_revertcount, 'deletesource': args.deletesource, 'downloader': args.downloader, 'downloader_args': dl_args, 'verbose': args.verbose, 'debug': args.debug} if Pool: bypyopt['processes'] = args.processes # we construct a ByPy object here. # if you want to try PanAPI, simply replace ByPy with PanAPI, and all the bduss related function _should_ work # I didn't use PanAPI here as I have never tried out those functions inside by = ByPy(**bypyopt) uargs = [] for arg in args.command[1:]: if sys.version_info[0] < 3: uargs.append(unicode(arg, gvar.SystemEncoding)) else: uargs.append(arg) result = getattr(by, args.command[0])(*uargs) if result != const.ENoError: errmsg = '-' * 64 + "\nError {}{}".format(result, ': ' + const.ErrorExplanations[result] if result in const.ErrorExplanations else '') perr(errmsg) else: perr("Error: Command '{}' not available.".format(args.command[0])) parser.print_help() return const.EParameter except KeyboardInterrupt: # handle keyboard interrupt pr("KeyboardInterrupt") pr("Abort") except Exception as ex: # NOTE: Capturing the exeption as 'ex' seems matters, otherwise this: # except Exception ex: # will sometimes give exception ... perr("Exception occurred:\n{}".format(formatex(ex))) pr("Abort") raise finally: if by: by.quit(result)
python
def main(argv=None): # IGNORE:C0111 ''' Main Entry ''' by = None reqres = check_requirements() if reqres == CheckResult.Error: perr("Requirement checking failed") sys.exit(const.EFatal) try: result = const.ENoError if argv is None: argv = sys.argv else: sys.argv.extend(argv) setuphandlers() parser = getparser() args = parser.parse_args() dl_args = '' if not args.downloader_args: if const.DownloaderArgsEnvKey in os.environ: dl_args = os.environ[const.DownloaderArgsEnvKey] else: prefixlen = len(const.DownloaderArgsIsFilePrefix) if args.downloader_args[:prefixlen] == const.DownloaderArgsIsFilePrefix: # file with io.open(args.downloader_args[prefixlen:], 'r', encoding = 'utf-8') as f: dl_args = f.read().strip() else: dl_args = args.downloader_args # house-keeping reminder # TODO: may need to move into ByPy for customized config dir if os.path.exists(const.HashCachePath): cachesize = getfilesize(const.HashCachePath) if cachesize > 10 * const.OneM or cachesize == -1: pwarn(( "*** WARNING ***\n" "Hash Cache file '{0}' is very large ({1}).\n" "This may affect program's performance (high memory consumption).\n" "You can first try to run 'bypy.py cleancache' to slim the file.\n" "But if the file size won't reduce (this warning persists)," " you may consider deleting / moving the Hash Cache file '{0}'\n" "*** WARNING ***\n\n\n").format(const.HashCachePath, human_size(cachesize))) # check for situations that require no ByPy object creation first if args.clean >= 1: return clean_prog_files(args.clean, args.verbose, args.configdir) # some arguments need some processing try: slice_size = interpret_size(args.slice) except (ValueError, KeyError): perr("Error: Invalid slice size specified '{}'".format(args.slice)) return const.EArgument try: chunk_size = interpret_size(args.chunk) except (ValueError, KeyError): perr("Error: Invalid slice size specified '{}'".format(args.slice)) return const.EArgument if len(args.command) <= 0 or \ (len(args.command) == 1 and args.command[0].lower() == 'help'): parser.print_help() return const.EArgument elif len(args.command) == 2 and args.command[0].lower() == 'help': ByPy.help(args.command[1]) return const.EArgument elif args.command[0] in ByPy.__dict__: # dir(ByPy), dir(by) #timeout = args.timeout or None cached.usecache = not args.forcehash bypyopt = { 'slice_size': slice_size, 'dl_chunk_size': chunk_size, 'verify': args.verify, 'retry': args.retry, 'timeout': args.timeout, 'quit_when_fail': args.quit, 'resumedownload': args.resumedl, 'incregex': args.incregex, 'ondup': args.ondup, 'followlink': args.followlink, 'checkssl': args.checkssl, 'cacerts': args.cacerts, 'rapiduploadonly': args.rapiduploadonly, 'mirror': args.mirror, 'selectmirror': args.selectmirror, 'configdir': args.configdir, 'resumedl_revertcount': args.resumedl_revertcount, 'deletesource': args.deletesource, 'downloader': args.downloader, 'downloader_args': dl_args, 'verbose': args.verbose, 'debug': args.debug} if Pool: bypyopt['processes'] = args.processes # we construct a ByPy object here. # if you want to try PanAPI, simply replace ByPy with PanAPI, and all the bduss related function _should_ work # I didn't use PanAPI here as I have never tried out those functions inside by = ByPy(**bypyopt) uargs = [] for arg in args.command[1:]: if sys.version_info[0] < 3: uargs.append(unicode(arg, gvar.SystemEncoding)) else: uargs.append(arg) result = getattr(by, args.command[0])(*uargs) if result != const.ENoError: errmsg = '-' * 64 + "\nError {}{}".format(result, ': ' + const.ErrorExplanations[result] if result in const.ErrorExplanations else '') perr(errmsg) else: perr("Error: Command '{}' not available.".format(args.command[0])) parser.print_help() return const.EParameter except KeyboardInterrupt: # handle keyboard interrupt pr("KeyboardInterrupt") pr("Abort") except Exception as ex: # NOTE: Capturing the exeption as 'ex' seems matters, otherwise this: # except Exception ex: # will sometimes give exception ... perr("Exception occurred:\n{}".format(formatex(ex))) pr("Abort") raise finally: if by: by.quit(result)
['def', 'main', '(', 'argv', '=', 'None', ')', ':', '# IGNORE:C0111', 'by', '=', 'None', 'reqres', '=', 'check_requirements', '(', ')', 'if', 'reqres', '==', 'CheckResult', '.', 'Error', ':', 'perr', '(', '"Requirement checking failed"', ')', 'sys', '.', 'exit', '(', 'const', '.', 'EFatal', ')', 'try', ':', 'result', '=', 'const', '.', 'ENoError', 'if', 'argv', 'is', 'None', ':', 'argv', '=', 'sys', '.', 'argv', 'else', ':', 'sys', '.', 'argv', '.', 'extend', '(', 'argv', ')', 'setuphandlers', '(', ')', 'parser', '=', 'getparser', '(', ')', 'args', '=', 'parser', '.', 'parse_args', '(', ')', 'dl_args', '=', "''", 'if', 'not', 'args', '.', 'downloader_args', ':', 'if', 'const', '.', 'DownloaderArgsEnvKey', 'in', 'os', '.', 'environ', ':', 'dl_args', '=', 'os', '.', 'environ', '[', 'const', '.', 'DownloaderArgsEnvKey', ']', 'else', ':', 'prefixlen', '=', 'len', '(', 'const', '.', 'DownloaderArgsIsFilePrefix', ')', 'if', 'args', '.', 'downloader_args', '[', ':', 'prefixlen', ']', '==', 'const', '.', 'DownloaderArgsIsFilePrefix', ':', '# file', 'with', 'io', '.', 'open', '(', 'args', '.', 'downloader_args', '[', 'prefixlen', ':', ']', ',', "'r'", ',', 'encoding', '=', "'utf-8'", ')', 'as', 'f', ':', 'dl_args', '=', 'f', '.', 'read', '(', ')', '.', 'strip', '(', ')', 'else', ':', 'dl_args', '=', 'args', '.', 'downloader_args', '# house-keeping reminder', '# TODO: may need to move into ByPy for customized config dir', 'if', 'os', '.', 'path', '.', 'exists', '(', 'const', '.', 'HashCachePath', ')', ':', 'cachesize', '=', 'getfilesize', '(', 'const', '.', 'HashCachePath', ')', 'if', 'cachesize', '>', '10', '*', 'const', '.', 'OneM', 'or', 'cachesize', '==', '-', '1', ':', 'pwarn', '(', '(', '"*** WARNING ***\\n"', '"Hash Cache file \'{0}\' is very large ({1}).\\n"', '"This may affect program\'s performance (high memory consumption).\\n"', '"You can first try to run \'bypy.py cleancache\' to slim the file.\\n"', '"But if the file size won\'t reduce (this warning persists),"', '" you may consider deleting / moving the Hash Cache file \'{0}\'\\n"', '"*** WARNING ***\\n\\n\\n"', ')', '.', 'format', '(', 'const', '.', 'HashCachePath', ',', 'human_size', '(', 'cachesize', ')', ')', ')', '# check for situations that require no ByPy object creation first', 'if', 'args', '.', 'clean', '>=', '1', ':', 'return', 'clean_prog_files', '(', 'args', '.', 'clean', ',', 'args', '.', 'verbose', ',', 'args', '.', 'configdir', ')', '# some arguments need some processing', 'try', ':', 'slice_size', '=', 'interpret_size', '(', 'args', '.', 'slice', ')', 'except', '(', 'ValueError', ',', 'KeyError', ')', ':', 'perr', '(', '"Error: Invalid slice size specified \'{}\'"', '.', 'format', '(', 'args', '.', 'slice', ')', ')', 'return', 'const', '.', 'EArgument', 'try', ':', 'chunk_size', '=', 'interpret_size', '(', 'args', '.', 'chunk', ')', 'except', '(', 'ValueError', ',', 'KeyError', ')', ':', 'perr', '(', '"Error: Invalid slice size specified \'{}\'"', '.', 'format', '(', 'args', '.', 'slice', ')', ')', 'return', 'const', '.', 'EArgument', 'if', 'len', '(', 'args', '.', 'command', ')', '<=', '0', 'or', '(', 'len', '(', 'args', '.', 'command', ')', '==', '1', 'and', 'args', '.', 'command', '[', '0', ']', '.', 'lower', '(', ')', '==', "'help'", ')', ':', 'parser', '.', 'print_help', '(', ')', 'return', 'const', '.', 'EArgument', 'elif', 'len', '(', 'args', '.', 'command', ')', '==', '2', 'and', 'args', '.', 'command', '[', '0', ']', '.', 'lower', '(', ')', '==', "'help'", ':', 'ByPy', '.', 'help', '(', 'args', '.', 'command', '[', '1', ']', ')', 'return', 'const', '.', 'EArgument', 'elif', 'args', '.', 'command', '[', '0', ']', 'in', 'ByPy', '.', '__dict__', ':', '# dir(ByPy), dir(by)', '#timeout = args.timeout or None', 'cached', '.', 'usecache', '=', 'not', 'args', '.', 'forcehash', 'bypyopt', '=', '{', "'slice_size'", ':', 'slice_size', ',', "'dl_chunk_size'", ':', 'chunk_size', ',', "'verify'", ':', 'args', '.', 'verify', ',', "'retry'", ':', 'args', '.', 'retry', ',', "'timeout'", ':', 'args', '.', 'timeout', ',', "'quit_when_fail'", ':', 'args', '.', 'quit', ',', "'resumedownload'", ':', 'args', '.', 'resumedl', ',', "'incregex'", ':', 'args', '.', 'incregex', ',', "'ondup'", ':', 'args', '.', 'ondup', ',', "'followlink'", ':', 'args', '.', 'followlink', ',', "'checkssl'", ':', 'args', '.', 'checkssl', ',', "'cacerts'", ':', 'args', '.', 'cacerts', ',', "'rapiduploadonly'", ':', 'args', '.', 'rapiduploadonly', ',', "'mirror'", ':', 'args', '.', 'mirror', ',', "'selectmirror'", ':', 'args', '.', 'selectmirror', ',', "'configdir'", ':', 'args', '.', 'configdir', ',', "'resumedl_revertcount'", ':', 'args', '.', 'resumedl_revertcount', ',', "'deletesource'", ':', 'args', '.', 'deletesource', ',', "'downloader'", ':', 'args', '.', 'downloader', ',', "'downloader_args'", ':', 'dl_args', ',', "'verbose'", ':', 'args', '.', 'verbose', ',', "'debug'", ':', 'args', '.', 'debug', '}', 'if', 'Pool', ':', 'bypyopt', '[', "'processes'", ']', '=', 'args', '.', 'processes', '# we construct a ByPy object here.', '# if you want to try PanAPI, simply replace ByPy with PanAPI, and all the bduss related function _should_ work', "# I didn't use PanAPI here as I have never tried out those functions inside", 'by', '=', 'ByPy', '(', '*', '*', 'bypyopt', ')', 'uargs', '=', '[', ']', 'for', 'arg', 'in', 'args', '.', 'command', '[', '1', ':', ']', ':', 'if', 'sys', '.', 'version_info', '[', '0', ']', '<', '3', ':', 'uargs', '.', 'append', '(', 'unicode', '(', 'arg', ',', 'gvar', '.', 'SystemEncoding', ')', ')', 'else', ':', 'uargs', '.', 'append', '(', 'arg', ')', 'result', '=', 'getattr', '(', 'by', ',', 'args', '.', 'command', '[', '0', ']', ')', '(', '*', 'uargs', ')', 'if', 'result', '!=', 'const', '.', 'ENoError', ':', 'errmsg', '=', "'-'", '*', '64', '+', '"\\nError {}{}"', '.', 'format', '(', 'result', ',', "': '", '+', 'const', '.', 'ErrorExplanations', '[', 'result', ']', 'if', 'result', 'in', 'const', '.', 'ErrorExplanations', 'else', "''", ')', 'perr', '(', 'errmsg', ')', 'else', ':', 'perr', '(', '"Error: Command \'{}\' not available."', '.', 'format', '(', 'args', '.', 'command', '[', '0', ']', ')', ')', 'parser', '.', 'print_help', '(', ')', 'return', 'const', '.', 'EParameter', 'except', 'KeyboardInterrupt', ':', '# handle keyboard interrupt', 'pr', '(', '"KeyboardInterrupt"', ')', 'pr', '(', '"Abort"', ')', 'except', 'Exception', 'as', 'ex', ':', "# NOTE: Capturing the exeption as 'ex' seems matters, otherwise this:", '# except Exception ex:', '# will sometimes give exception ...', 'perr', '(', '"Exception occurred:\\n{}"', '.', 'format', '(', 'formatex', '(', 'ex', ')', ')', ')', 'pr', '(', '"Abort"', ')', 'raise', 'finally', ':', 'if', 'by', ':', 'by', '.', 'quit', '(', 'result', ')']
Main Entry
['Main', 'Entry']
train
https://github.com/houtianze/bypy/blob/c59b6183e2fca45f11138bbcdec6247449b2eaad/bypy/bypy.py#L3573-L3705
4,602
ktbyers/netmiko
netmiko/snmp_autodetect.py
SNMPDetect.autodetect
def autodetect(self): """ Try to guess the device_type using SNMP GET based on the SNMP_MAPPER dict. The type which is returned is directly matching the name in *netmiko.ssh_dispatcher.CLASS_MAPPER_BASE* dict. Thus you can use this name to retrieve automatically the right ConnectionClass Returns ------- potential_type : str The name of the device_type that must be running. """ # Convert SNMP_MAPPER to a list and sort by priority snmp_mapper_list = [] for k, v in SNMP_MAPPER.items(): snmp_mapper_list.append({k: v}) snmp_mapper_list = sorted( snmp_mapper_list, key=lambda x: list(x.values())[0]["priority"] ) snmp_mapper_list.reverse() for entry in snmp_mapper_list: for device_type, v in entry.items(): oid = v["oid"] regex = v["expr"] # Used cache data if we already queryied this OID if self._response_cache.get(oid): snmp_response = self._response_cache.get(oid) else: snmp_response = self._get_snmp(oid) self._response_cache[oid] = snmp_response # See if we had a match if re.search(regex, snmp_response): return device_type return None
python
def autodetect(self): """ Try to guess the device_type using SNMP GET based on the SNMP_MAPPER dict. The type which is returned is directly matching the name in *netmiko.ssh_dispatcher.CLASS_MAPPER_BASE* dict. Thus you can use this name to retrieve automatically the right ConnectionClass Returns ------- potential_type : str The name of the device_type that must be running. """ # Convert SNMP_MAPPER to a list and sort by priority snmp_mapper_list = [] for k, v in SNMP_MAPPER.items(): snmp_mapper_list.append({k: v}) snmp_mapper_list = sorted( snmp_mapper_list, key=lambda x: list(x.values())[0]["priority"] ) snmp_mapper_list.reverse() for entry in snmp_mapper_list: for device_type, v in entry.items(): oid = v["oid"] regex = v["expr"] # Used cache data if we already queryied this OID if self._response_cache.get(oid): snmp_response = self._response_cache.get(oid) else: snmp_response = self._get_snmp(oid) self._response_cache[oid] = snmp_response # See if we had a match if re.search(regex, snmp_response): return device_type return None
['def', 'autodetect', '(', 'self', ')', ':', '# Convert SNMP_MAPPER to a list and sort by priority', 'snmp_mapper_list', '=', '[', ']', 'for', 'k', ',', 'v', 'in', 'SNMP_MAPPER', '.', 'items', '(', ')', ':', 'snmp_mapper_list', '.', 'append', '(', '{', 'k', ':', 'v', '}', ')', 'snmp_mapper_list', '=', 'sorted', '(', 'snmp_mapper_list', ',', 'key', '=', 'lambda', 'x', ':', 'list', '(', 'x', '.', 'values', '(', ')', ')', '[', '0', ']', '[', '"priority"', ']', ')', 'snmp_mapper_list', '.', 'reverse', '(', ')', 'for', 'entry', 'in', 'snmp_mapper_list', ':', 'for', 'device_type', ',', 'v', 'in', 'entry', '.', 'items', '(', ')', ':', 'oid', '=', 'v', '[', '"oid"', ']', 'regex', '=', 'v', '[', '"expr"', ']', '# Used cache data if we already queryied this OID', 'if', 'self', '.', '_response_cache', '.', 'get', '(', 'oid', ')', ':', 'snmp_response', '=', 'self', '.', '_response_cache', '.', 'get', '(', 'oid', ')', 'else', ':', 'snmp_response', '=', 'self', '.', '_get_snmp', '(', 'oid', ')', 'self', '.', '_response_cache', '[', 'oid', ']', '=', 'snmp_response', '# See if we had a match', 'if', 're', '.', 'search', '(', 'regex', ',', 'snmp_response', ')', ':', 'return', 'device_type', 'return', 'None']
Try to guess the device_type using SNMP GET based on the SNMP_MAPPER dict. The type which is returned is directly matching the name in *netmiko.ssh_dispatcher.CLASS_MAPPER_BASE* dict. Thus you can use this name to retrieve automatically the right ConnectionClass Returns ------- potential_type : str The name of the device_type that must be running.
['Try', 'to', 'guess', 'the', 'device_type', 'using', 'SNMP', 'GET', 'based', 'on', 'the', 'SNMP_MAPPER', 'dict', '.', 'The', 'type', 'which', 'is', 'returned', 'is', 'directly', 'matching', 'the', 'name', 'in', '*', 'netmiko', '.', 'ssh_dispatcher', '.', 'CLASS_MAPPER_BASE', '*', 'dict', '.']
train
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/snmp_autodetect.py#L304-L342
4,603
wonambi-python/wonambi
wonambi/attr/chan.py
find_channel_groups
def find_channel_groups(chan): """Channels are often organized in groups (different grids / strips or channels in different brain locations), so we use a simple heuristic to get these channel groups. Parameters ---------- chan : instance of Channels channels to group Returns ------- groups : dict channel groups: key is the common string, and the item is a list of labels """ labels = chan.return_label() group_names = {match('([A-Za-z ]+)\d+', label).group(1) for label in labels} groups = {} for group_name in group_names: groups[group_name] = [label for label in labels if label.startswith(group_name)] return groups
python
def find_channel_groups(chan): """Channels are often organized in groups (different grids / strips or channels in different brain locations), so we use a simple heuristic to get these channel groups. Parameters ---------- chan : instance of Channels channels to group Returns ------- groups : dict channel groups: key is the common string, and the item is a list of labels """ labels = chan.return_label() group_names = {match('([A-Za-z ]+)\d+', label).group(1) for label in labels} groups = {} for group_name in group_names: groups[group_name] = [label for label in labels if label.startswith(group_name)] return groups
['def', 'find_channel_groups', '(', 'chan', ')', ':', 'labels', '=', 'chan', '.', 'return_label', '(', ')', 'group_names', '=', '{', 'match', '(', "'([A-Za-z ]+)\\d+'", ',', 'label', ')', '.', 'group', '(', '1', ')', 'for', 'label', 'in', 'labels', '}', 'groups', '=', '{', '}', 'for', 'group_name', 'in', 'group_names', ':', 'groups', '[', 'group_name', ']', '=', '[', 'label', 'for', 'label', 'in', 'labels', 'if', 'label', '.', 'startswith', '(', 'group_name', ')', ']', 'return', 'groups']
Channels are often organized in groups (different grids / strips or channels in different brain locations), so we use a simple heuristic to get these channel groups. Parameters ---------- chan : instance of Channels channels to group Returns ------- groups : dict channel groups: key is the common string, and the item is a list of labels
['Channels', 'are', 'often', 'organized', 'in', 'groups', '(', 'different', 'grids', '/', 'strips', 'or', 'channels', 'in', 'different', 'brain', 'locations', ')', 'so', 'we', 'use', 'a', 'simple', 'heuristic', 'to', 'get', 'these', 'channel', 'groups', '.']
train
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/attr/chan.py#L427-L450
4,604
ska-sa/purr
Purr/Plugins/local_pychart/area.py
T.draw
def draw(self, can=None): "Draw the charts." if can == None: can = canvas.default_canvas() assert self.check_integrity() for plot in self.__plots: plot.check_integrity() self.x_range, self.x_grid_interval = \ self.__get_data_range(self.x_range, 'X', self.x_coord, self.x_grid_interval) self.y_range, self.y_grid_interval = \ self.__get_data_range(self.y_range, 'Y', self.y_coord, self.y_grid_interval) can.rectangle(self.border_line_style, self.bg_style, self.loc[0], self.loc[1], self.loc[0] + self.size[0], self.loc[1] + self.size[1]) if not self.x_grid_over_plot: self.__draw_x_grid_and_axis(can) if not self.y_grid_over_plot: self.__draw_y_grid_and_axis(can) clipbox = theme.adjust_bounding_box([self.loc[0], self.loc[1], self.loc[0] + self.size[0], self.loc[1] + self.size[1]]) can.clip(clipbox[0], clipbox[1], clipbox[2], clipbox[3]) for plot in self.__plots: plot.draw(self, can) can.endclip() if self.x_grid_over_plot: self.__draw_x_grid_and_axis(can) if self.y_grid_over_plot: self.__draw_y_grid_and_axis(can) if self.legend == _dummy_legend: self.legend = legend.T() if self.legend: legends = [] for plot in self.__plots: entry = plot.get_legend_entry() if entry == None: pass elif type(entry) != ListType: legends.append(entry) else: for e in entry: legends.append(e) self.legend.draw(self, legends, can)
python
def draw(self, can=None): "Draw the charts." if can == None: can = canvas.default_canvas() assert self.check_integrity() for plot in self.__plots: plot.check_integrity() self.x_range, self.x_grid_interval = \ self.__get_data_range(self.x_range, 'X', self.x_coord, self.x_grid_interval) self.y_range, self.y_grid_interval = \ self.__get_data_range(self.y_range, 'Y', self.y_coord, self.y_grid_interval) can.rectangle(self.border_line_style, self.bg_style, self.loc[0], self.loc[1], self.loc[0] + self.size[0], self.loc[1] + self.size[1]) if not self.x_grid_over_plot: self.__draw_x_grid_and_axis(can) if not self.y_grid_over_plot: self.__draw_y_grid_and_axis(can) clipbox = theme.adjust_bounding_box([self.loc[0], self.loc[1], self.loc[0] + self.size[0], self.loc[1] + self.size[1]]) can.clip(clipbox[0], clipbox[1], clipbox[2], clipbox[3]) for plot in self.__plots: plot.draw(self, can) can.endclip() if self.x_grid_over_plot: self.__draw_x_grid_and_axis(can) if self.y_grid_over_plot: self.__draw_y_grid_and_axis(can) if self.legend == _dummy_legend: self.legend = legend.T() if self.legend: legends = [] for plot in self.__plots: entry = plot.get_legend_entry() if entry == None: pass elif type(entry) != ListType: legends.append(entry) else: for e in entry: legends.append(e) self.legend.draw(self, legends, can)
['def', 'draw', '(', 'self', ',', 'can', '=', 'None', ')', ':', 'if', 'can', '==', 'None', ':', 'can', '=', 'canvas', '.', 'default_canvas', '(', ')', 'assert', 'self', '.', 'check_integrity', '(', ')', 'for', 'plot', 'in', 'self', '.', '__plots', ':', 'plot', '.', 'check_integrity', '(', ')', 'self', '.', 'x_range', ',', 'self', '.', 'x_grid_interval', '=', 'self', '.', '__get_data_range', '(', 'self', '.', 'x_range', ',', "'X'", ',', 'self', '.', 'x_coord', ',', 'self', '.', 'x_grid_interval', ')', 'self', '.', 'y_range', ',', 'self', '.', 'y_grid_interval', '=', 'self', '.', '__get_data_range', '(', 'self', '.', 'y_range', ',', "'Y'", ',', 'self', '.', 'y_coord', ',', 'self', '.', 'y_grid_interval', ')', 'can', '.', 'rectangle', '(', 'self', '.', 'border_line_style', ',', 'self', '.', 'bg_style', ',', 'self', '.', 'loc', '[', '0', ']', ',', 'self', '.', 'loc', '[', '1', ']', ',', 'self', '.', 'loc', '[', '0', ']', '+', 'self', '.', 'size', '[', '0', ']', ',', 'self', '.', 'loc', '[', '1', ']', '+', 'self', '.', 'size', '[', '1', ']', ')', 'if', 'not', 'self', '.', 'x_grid_over_plot', ':', 'self', '.', '__draw_x_grid_and_axis', '(', 'can', ')', 'if', 'not', 'self', '.', 'y_grid_over_plot', ':', 'self', '.', '__draw_y_grid_and_axis', '(', 'can', ')', 'clipbox', '=', 'theme', '.', 'adjust_bounding_box', '(', '[', 'self', '.', 'loc', '[', '0', ']', ',', 'self', '.', 'loc', '[', '1', ']', ',', 'self', '.', 'loc', '[', '0', ']', '+', 'self', '.', 'size', '[', '0', ']', ',', 'self', '.', 'loc', '[', '1', ']', '+', 'self', '.', 'size', '[', '1', ']', ']', ')', 'can', '.', 'clip', '(', 'clipbox', '[', '0', ']', ',', 'clipbox', '[', '1', ']', ',', 'clipbox', '[', '2', ']', ',', 'clipbox', '[', '3', ']', ')', 'for', 'plot', 'in', 'self', '.', '__plots', ':', 'plot', '.', 'draw', '(', 'self', ',', 'can', ')', 'can', '.', 'endclip', '(', ')', 'if', 'self', '.', 'x_grid_over_plot', ':', 'self', '.', '__draw_x_grid_and_axis', '(', 'can', ')', 'if', 'self', '.', 'y_grid_over_plot', ':', 'self', '.', '__draw_y_grid_and_axis', '(', 'can', ')', 'if', 'self', '.', 'legend', '==', '_dummy_legend', ':', 'self', '.', 'legend', '=', 'legend', '.', 'T', '(', ')', 'if', 'self', '.', 'legend', ':', 'legends', '=', '[', ']', 'for', 'plot', 'in', 'self', '.', '__plots', ':', 'entry', '=', 'plot', '.', 'get_legend_entry', '(', ')', 'if', 'entry', '==', 'None', ':', 'pass', 'elif', 'type', '(', 'entry', ')', '!=', 'ListType', ':', 'legends', '.', 'append', '(', 'entry', ')', 'else', ':', 'for', 'e', 'in', 'entry', ':', 'legends', '.', 'append', '(', 'e', ')', 'self', '.', 'legend', '.', 'draw', '(', 'self', ',', 'legends', ',', 'can', ')']
Draw the charts.
['Draw', 'the', 'charts', '.']
train
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/area.py#L193-L255
4,605
databio/pypiper
pypiper/manager.py
PipelineManager._attend_process
def _attend_process(self, proc, sleeptime): """ Waits on a process for a given time to see if it finishes, returns True if it's still running after the given time or False as soon as it returns. :param psutil.Popen proc: Process object opened by psutil.Popen() :param float sleeptime: Time to wait :return bool: True if process is still running; otherwise false """ # print("attend:{}".format(proc.pid)) try: proc.wait(timeout=sleeptime) except psutil.TimeoutExpired: return True return False
python
def _attend_process(self, proc, sleeptime): """ Waits on a process for a given time to see if it finishes, returns True if it's still running after the given time or False as soon as it returns. :param psutil.Popen proc: Process object opened by psutil.Popen() :param float sleeptime: Time to wait :return bool: True if process is still running; otherwise false """ # print("attend:{}".format(proc.pid)) try: proc.wait(timeout=sleeptime) except psutil.TimeoutExpired: return True return False
['def', '_attend_process', '(', 'self', ',', 'proc', ',', 'sleeptime', ')', ':', '# print("attend:{}".format(proc.pid))', 'try', ':', 'proc', '.', 'wait', '(', 'timeout', '=', 'sleeptime', ')', 'except', 'psutil', '.', 'TimeoutExpired', ':', 'return', 'True', 'return', 'False']
Waits on a process for a given time to see if it finishes, returns True if it's still running after the given time or False as soon as it returns. :param psutil.Popen proc: Process object opened by psutil.Popen() :param float sleeptime: Time to wait :return bool: True if process is still running; otherwise false
['Waits', 'on', 'a', 'process', 'for', 'a', 'given', 'time', 'to', 'see', 'if', 'it', 'finishes', 'returns', 'True', 'if', 'it', 's', 'still', 'running', 'after', 'the', 'given', 'time', 'or', 'False', 'as', 'soon', 'as', 'it', 'returns', '.']
train
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L810-L825
4,606
nschloe/optimesh
optimesh/odt.py
energy
def energy(mesh, uniform_density=False): """The mesh energy is defined as E = int_Omega |u_l(x) - u(x)| rho(x) dx where u(x) = ||x||^2 and u_l is its piecewise linearization on the mesh. """ # E = 1/(d+1) sum_i ||x_i||^2 |omega_i| - int_Omega_i ||x||^2 dim = mesh.cells["nodes"].shape[1] - 1 star_volume = numpy.zeros(mesh.node_coords.shape[0]) for i in range(3): idx = mesh.cells["nodes"][:, i] if uniform_density: # rho = 1, # int_{star} phi_i * rho = 1/(d+1) sum_{triangles in star} |triangle| fastfunc.add.at(star_volume, idx, mesh.cell_volumes) else: # rho = 1 / tau_j, # int_{star} phi_i * rho = 1/(d+1) |num triangles in star| fastfunc.add.at(star_volume, idx, numpy.ones(idx.shape, dtype=float)) x2 = numpy.einsum("ij,ij->i", mesh.node_coords, mesh.node_coords) out = 1 / (dim + 1) * numpy.dot(star_volume, x2) # could be cached assert dim == 2 x = mesh.node_coords[:, :2] triangles = numpy.moveaxis(x[mesh.cells["nodes"]], 0, 1) val = quadpy.triangle.integrate( lambda x: x[0] ** 2 + x[1] ** 2, triangles, # Take any scheme with order 2 quadpy.triangle.Dunavant(2), ) if uniform_density: val = numpy.sum(val) else: rho = 1.0 / mesh.cell_volumes val = numpy.dot(val, rho) assert out >= val return out - val
python
def energy(mesh, uniform_density=False): """The mesh energy is defined as E = int_Omega |u_l(x) - u(x)| rho(x) dx where u(x) = ||x||^2 and u_l is its piecewise linearization on the mesh. """ # E = 1/(d+1) sum_i ||x_i||^2 |omega_i| - int_Omega_i ||x||^2 dim = mesh.cells["nodes"].shape[1] - 1 star_volume = numpy.zeros(mesh.node_coords.shape[0]) for i in range(3): idx = mesh.cells["nodes"][:, i] if uniform_density: # rho = 1, # int_{star} phi_i * rho = 1/(d+1) sum_{triangles in star} |triangle| fastfunc.add.at(star_volume, idx, mesh.cell_volumes) else: # rho = 1 / tau_j, # int_{star} phi_i * rho = 1/(d+1) |num triangles in star| fastfunc.add.at(star_volume, idx, numpy.ones(idx.shape, dtype=float)) x2 = numpy.einsum("ij,ij->i", mesh.node_coords, mesh.node_coords) out = 1 / (dim + 1) * numpy.dot(star_volume, x2) # could be cached assert dim == 2 x = mesh.node_coords[:, :2] triangles = numpy.moveaxis(x[mesh.cells["nodes"]], 0, 1) val = quadpy.triangle.integrate( lambda x: x[0] ** 2 + x[1] ** 2, triangles, # Take any scheme with order 2 quadpy.triangle.Dunavant(2), ) if uniform_density: val = numpy.sum(val) else: rho = 1.0 / mesh.cell_volumes val = numpy.dot(val, rho) assert out >= val return out - val
['def', 'energy', '(', 'mesh', ',', 'uniform_density', '=', 'False', ')', ':', '# E = 1/(d+1) sum_i ||x_i||^2 |omega_i| - int_Omega_i ||x||^2', 'dim', '=', 'mesh', '.', 'cells', '[', '"nodes"', ']', '.', 'shape', '[', '1', ']', '-', '1', 'star_volume', '=', 'numpy', '.', 'zeros', '(', 'mesh', '.', 'node_coords', '.', 'shape', '[', '0', ']', ')', 'for', 'i', 'in', 'range', '(', '3', ')', ':', 'idx', '=', 'mesh', '.', 'cells', '[', '"nodes"', ']', '[', ':', ',', 'i', ']', 'if', 'uniform_density', ':', '# rho = 1,', '# int_{star} phi_i * rho = 1/(d+1) sum_{triangles in star} |triangle|', 'fastfunc', '.', 'add', '.', 'at', '(', 'star_volume', ',', 'idx', ',', 'mesh', '.', 'cell_volumes', ')', 'else', ':', '# rho = 1 / tau_j,', '# int_{star} phi_i * rho = 1/(d+1) |num triangles in star|', 'fastfunc', '.', 'add', '.', 'at', '(', 'star_volume', ',', 'idx', ',', 'numpy', '.', 'ones', '(', 'idx', '.', 'shape', ',', 'dtype', '=', 'float', ')', ')', 'x2', '=', 'numpy', '.', 'einsum', '(', '"ij,ij->i"', ',', 'mesh', '.', 'node_coords', ',', 'mesh', '.', 'node_coords', ')', 'out', '=', '1', '/', '(', 'dim', '+', '1', ')', '*', 'numpy', '.', 'dot', '(', 'star_volume', ',', 'x2', ')', '# could be cached', 'assert', 'dim', '==', '2', 'x', '=', 'mesh', '.', 'node_coords', '[', ':', ',', ':', '2', ']', 'triangles', '=', 'numpy', '.', 'moveaxis', '(', 'x', '[', 'mesh', '.', 'cells', '[', '"nodes"', ']', ']', ',', '0', ',', '1', ')', 'val', '=', 'quadpy', '.', 'triangle', '.', 'integrate', '(', 'lambda', 'x', ':', 'x', '[', '0', ']', '**', '2', '+', 'x', '[', '1', ']', '**', '2', ',', 'triangles', ',', '# Take any scheme with order 2', 'quadpy', '.', 'triangle', '.', 'Dunavant', '(', '2', ')', ',', ')', 'if', 'uniform_density', ':', 'val', '=', 'numpy', '.', 'sum', '(', 'val', ')', 'else', ':', 'rho', '=', '1.0', '/', 'mesh', '.', 'cell_volumes', 'val', '=', 'numpy', '.', 'dot', '(', 'val', ',', 'rho', ')', 'assert', 'out', '>=', 'val', 'return', 'out', '-', 'val']
The mesh energy is defined as E = int_Omega |u_l(x) - u(x)| rho(x) dx where u(x) = ||x||^2 and u_l is its piecewise linearization on the mesh.
['The', 'mesh', 'energy', 'is', 'defined', 'as']
train
https://github.com/nschloe/optimesh/blob/b85f48d1559a51a01cc3df6214c61ca8ad5ed786/optimesh/odt.py#L28-L70
4,607
brian-rose/climlab
climlab/process/process.py
Process.lon
def lon(self): """Longitude of grid centers (degrees) :getter: Returns the points of axis ``'lon'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lon'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thislon = dom.axes['lon'].points except: pass return thislon except: raise ValueError('Can\'t resolve a lon axis.')
python
def lon(self): """Longitude of grid centers (degrees) :getter: Returns the points of axis ``'lon'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lon'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thislon = dom.axes['lon'].points except: pass return thislon except: raise ValueError('Can\'t resolve a lon axis.')
['def', 'lon', '(', 'self', ')', ':', 'try', ':', 'for', 'domname', ',', 'dom', 'in', 'self', '.', 'domains', '.', 'items', '(', ')', ':', 'try', ':', 'thislon', '=', 'dom', '.', 'axes', '[', "'lon'", ']', '.', 'points', 'except', ':', 'pass', 'return', 'thislon', 'except', ':', 'raise', 'ValueError', '(', "'Can\\'t resolve a lon axis.'", ')']
Longitude of grid centers (degrees) :getter: Returns the points of axis ``'lon'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lon'`` axis can be found.
['Longitude', 'of', 'grid', 'centers', '(', 'degrees', ')']
train
https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/process/process.py#L662-L680
4,608
django-auth-ldap/django-auth-ldap
django_auth_ldap/config.py
_LDAPConfig.get_logger
def get_logger(cls): """ Initializes and returns our logger instance. """ if cls.logger is None: cls.logger = logging.getLogger("django_auth_ldap") cls.logger.addHandler(logging.NullHandler()) return cls.logger
python
def get_logger(cls): """ Initializes and returns our logger instance. """ if cls.logger is None: cls.logger = logging.getLogger("django_auth_ldap") cls.logger.addHandler(logging.NullHandler()) return cls.logger
['def', 'get_logger', '(', 'cls', ')', ':', 'if', 'cls', '.', 'logger', 'is', 'None', ':', 'cls', '.', 'logger', '=', 'logging', '.', 'getLogger', '(', '"django_auth_ldap"', ')', 'cls', '.', 'logger', '.', 'addHandler', '(', 'logging', '.', 'NullHandler', '(', ')', ')', 'return', 'cls', '.', 'logger']
Initializes and returns our logger instance.
['Initializes', 'and', 'returns', 'our', 'logger', 'instance', '.']
train
https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/config.py#L68-L76
4,609
dcaune/perseus-lib-python-common
majormode/perseus/utils/cast.py
string_to_macaddr
def string_to_macaddr(value, strict=False): """ Return a tuple corresponding to the string representation of a Media Access Control address (MAC address) of a device, which is a unique identifier assigned to a network interface controller (NIC) for communications at the data link layer of a network segment. The standard (IEEE 802) format for printing EUI-48 addresses in human- friendly form is six groups of two hexadecimal digits, separated by hyphens (-) in transmission order (e.g. ``01-23-45-67-89-AB``). This form is also commonly used for EUI-64 (e.g. ``01-23-45-67-89-AB-CD-EF``). Other conventions include six groups of two hexadecimal digits separated by colons (:) (e.g. 01:23:45:67:89:AB), and three groups of four hexadecimal digits separated by dots (.) (e.g. ``0123.4567.89AB``); again in transmission order. @param value: a string representation of a MAC address. @param strict: indicate whether the ``None`` value is accepted. @return: a tuple of 6 hexadecimal strings ``(hex1, hex2, hex3, hex4, hex5, hex6)``, each ranging from ``0x00`` to ``0xFF``. """ if is_undefined(value): if strict: raise ValueError('The value cannot be null') return None match = REGEX_MAC_ADDRESS.match(value.lower()) if match is None: raise ValueError('The specified string "%s" does not represent a MAC address' % value) return match.groups()
python
def string_to_macaddr(value, strict=False): """ Return a tuple corresponding to the string representation of a Media Access Control address (MAC address) of a device, which is a unique identifier assigned to a network interface controller (NIC) for communications at the data link layer of a network segment. The standard (IEEE 802) format for printing EUI-48 addresses in human- friendly form is six groups of two hexadecimal digits, separated by hyphens (-) in transmission order (e.g. ``01-23-45-67-89-AB``). This form is also commonly used for EUI-64 (e.g. ``01-23-45-67-89-AB-CD-EF``). Other conventions include six groups of two hexadecimal digits separated by colons (:) (e.g. 01:23:45:67:89:AB), and three groups of four hexadecimal digits separated by dots (.) (e.g. ``0123.4567.89AB``); again in transmission order. @param value: a string representation of a MAC address. @param strict: indicate whether the ``None`` value is accepted. @return: a tuple of 6 hexadecimal strings ``(hex1, hex2, hex3, hex4, hex5, hex6)``, each ranging from ``0x00`` to ``0xFF``. """ if is_undefined(value): if strict: raise ValueError('The value cannot be null') return None match = REGEX_MAC_ADDRESS.match(value.lower()) if match is None: raise ValueError('The specified string "%s" does not represent a MAC address' % value) return match.groups()
['def', 'string_to_macaddr', '(', 'value', ',', 'strict', '=', 'False', ')', ':', 'if', 'is_undefined', '(', 'value', ')', ':', 'if', 'strict', ':', 'raise', 'ValueError', '(', "'The value cannot be null'", ')', 'return', 'None', 'match', '=', 'REGEX_MAC_ADDRESS', '.', 'match', '(', 'value', '.', 'lower', '(', ')', ')', 'if', 'match', 'is', 'None', ':', 'raise', 'ValueError', '(', '\'The specified string "%s" does not represent a MAC address\'', '%', 'value', ')', 'return', 'match', '.', 'groups', '(', ')']
Return a tuple corresponding to the string representation of a Media Access Control address (MAC address) of a device, which is a unique identifier assigned to a network interface controller (NIC) for communications at the data link layer of a network segment. The standard (IEEE 802) format for printing EUI-48 addresses in human- friendly form is six groups of two hexadecimal digits, separated by hyphens (-) in transmission order (e.g. ``01-23-45-67-89-AB``). This form is also commonly used for EUI-64 (e.g. ``01-23-45-67-89-AB-CD-EF``). Other conventions include six groups of two hexadecimal digits separated by colons (:) (e.g. 01:23:45:67:89:AB), and three groups of four hexadecimal digits separated by dots (.) (e.g. ``0123.4567.89AB``); again in transmission order. @param value: a string representation of a MAC address. @param strict: indicate whether the ``None`` value is accepted. @return: a tuple of 6 hexadecimal strings ``(hex1, hex2, hex3, hex4, hex5, hex6)``, each ranging from ``0x00`` to ``0xFF``.
['Return', 'a', 'tuple', 'corresponding', 'to', 'the', 'string', 'representation', 'of', 'a', 'Media', 'Access', 'Control', 'address', '(', 'MAC', 'address', ')', 'of', 'a', 'device', 'which', 'is', 'a', 'unique', 'identifier', 'assigned', 'to', 'a', 'network', 'interface', 'controller', '(', 'NIC', ')', 'for', 'communications', 'at', 'the', 'data', 'link', 'layer', 'of', 'a', 'network', 'segment', '.']
train
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/majormode/perseus/utils/cast.py#L386-L420
4,610
riccardocagnasso/useless
src/useless/common/__init__.py
parse_cstring
def parse_cstring(stream, offset): """ parse_cstring will parse a null-terminated string in a bytestream. The string will be decoded with UTF-8 decoder, of course since we are doing this byte-a-byte, it won't really work for all Unicode strings. TODO: add proper Unicode support """ stream.seek(offset) string = "" while True: char = struct.unpack('c', stream.read(1))[0] if char == b'\x00': return string else: string += char.decode()
python
def parse_cstring(stream, offset): """ parse_cstring will parse a null-terminated string in a bytestream. The string will be decoded with UTF-8 decoder, of course since we are doing this byte-a-byte, it won't really work for all Unicode strings. TODO: add proper Unicode support """ stream.seek(offset) string = "" while True: char = struct.unpack('c', stream.read(1))[0] if char == b'\x00': return string else: string += char.decode()
['def', 'parse_cstring', '(', 'stream', ',', 'offset', ')', ':', 'stream', '.', 'seek', '(', 'offset', ')', 'string', '=', '""', 'while', 'True', ':', 'char', '=', 'struct', '.', 'unpack', '(', "'c'", ',', 'stream', '.', 'read', '(', '1', ')', ')', '[', '0', ']', 'if', 'char', '==', "b'\\x00'", ':', 'return', 'string', 'else', ':', 'string', '+=', 'char', '.', 'decode', '(', ')']
parse_cstring will parse a null-terminated string in a bytestream. The string will be decoded with UTF-8 decoder, of course since we are doing this byte-a-byte, it won't really work for all Unicode strings. TODO: add proper Unicode support
['parse_cstring', 'will', 'parse', 'a', 'null', '-', 'terminated', 'string', 'in', 'a', 'bytestream', '.']
train
https://github.com/riccardocagnasso/useless/blob/5167aab82958f653148e3689c9a7e548d4fa2cba/src/useless/common/__init__.py#L29-L48
4,611
facelessuser/soupsieve
soupsieve/__init__.py
match
def match(select, tag, namespaces=None, flags=0, **kwargs): """Match node.""" return compile(select, namespaces, flags, **kwargs).match(tag)
python
def match(select, tag, namespaces=None, flags=0, **kwargs): """Match node.""" return compile(select, namespaces, flags, **kwargs).match(tag)
['def', 'match', '(', 'select', ',', 'tag', ',', 'namespaces', '=', 'None', ',', 'flags', '=', '0', ',', '*', '*', 'kwargs', ')', ':', 'return', 'compile', '(', 'select', ',', 'namespaces', ',', 'flags', ',', '*', '*', 'kwargs', ')', '.', 'match', '(', 'tag', ')']
Match node.
['Match', 'node', '.']
train
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/__init__.py#L78-L81
4,612
ibis-project/ibis
ibis/expr/api.py
cross_join
def cross_join(*tables, **kwargs): """ Perform a cross join (cartesian product) amongst a list of tables, with optional set of prefixes to apply to overlapping column names Parameters ---------- tables : ibis.expr.types.TableExpr Returns ------- joined : TableExpr Examples -------- >>> import ibis >>> schemas = [(name, 'int64') for name in 'abcde'] >>> a, b, c, d, e = [ ... ibis.table([(name, type)], name=name) for name, type in schemas ... ] >>> joined1 = ibis.cross_join(a, b, c, d, e) >>> joined1 # doctest: +NORMALIZE_WHITESPACE ref_0 UnboundTable[table] name: a schema: a : int64 ref_1 UnboundTable[table] name: b schema: b : int64 ref_2 UnboundTable[table] name: c schema: c : int64 ref_3 UnboundTable[table] name: d schema: d : int64 ref_4 UnboundTable[table] name: e schema: e : int64 CrossJoin[table] left: Table: ref_0 right: CrossJoin[table] left: CrossJoin[table] left: CrossJoin[table] left: Table: ref_1 right: Table: ref_2 right: Table: ref_3 right: Table: ref_4 """ # TODO(phillipc): Implement prefix keyword argument op = ops.CrossJoin(*tables, **kwargs) return op.to_expr()
python
def cross_join(*tables, **kwargs): """ Perform a cross join (cartesian product) amongst a list of tables, with optional set of prefixes to apply to overlapping column names Parameters ---------- tables : ibis.expr.types.TableExpr Returns ------- joined : TableExpr Examples -------- >>> import ibis >>> schemas = [(name, 'int64') for name in 'abcde'] >>> a, b, c, d, e = [ ... ibis.table([(name, type)], name=name) for name, type in schemas ... ] >>> joined1 = ibis.cross_join(a, b, c, d, e) >>> joined1 # doctest: +NORMALIZE_WHITESPACE ref_0 UnboundTable[table] name: a schema: a : int64 ref_1 UnboundTable[table] name: b schema: b : int64 ref_2 UnboundTable[table] name: c schema: c : int64 ref_3 UnboundTable[table] name: d schema: d : int64 ref_4 UnboundTable[table] name: e schema: e : int64 CrossJoin[table] left: Table: ref_0 right: CrossJoin[table] left: CrossJoin[table] left: CrossJoin[table] left: Table: ref_1 right: Table: ref_2 right: Table: ref_3 right: Table: ref_4 """ # TODO(phillipc): Implement prefix keyword argument op = ops.CrossJoin(*tables, **kwargs) return op.to_expr()
['def', 'cross_join', '(', '*', 'tables', ',', '*', '*', 'kwargs', ')', ':', '# TODO(phillipc): Implement prefix keyword argument', 'op', '=', 'ops', '.', 'CrossJoin', '(', '*', 'tables', ',', '*', '*', 'kwargs', ')', 'return', 'op', '.', 'to_expr', '(', ')']
Perform a cross join (cartesian product) amongst a list of tables, with optional set of prefixes to apply to overlapping column names Parameters ---------- tables : ibis.expr.types.TableExpr Returns ------- joined : TableExpr Examples -------- >>> import ibis >>> schemas = [(name, 'int64') for name in 'abcde'] >>> a, b, c, d, e = [ ... ibis.table([(name, type)], name=name) for name, type in schemas ... ] >>> joined1 = ibis.cross_join(a, b, c, d, e) >>> joined1 # doctest: +NORMALIZE_WHITESPACE ref_0 UnboundTable[table] name: a schema: a : int64 ref_1 UnboundTable[table] name: b schema: b : int64 ref_2 UnboundTable[table] name: c schema: c : int64 ref_3 UnboundTable[table] name: d schema: d : int64 ref_4 UnboundTable[table] name: e schema: e : int64 CrossJoin[table] left: Table: ref_0 right: CrossJoin[table] left: CrossJoin[table] left: CrossJoin[table] left: Table: ref_1 right: Table: ref_2 right: Table: ref_3 right: Table: ref_4
['Perform', 'a', 'cross', 'join', '(', 'cartesian', 'product', ')', 'amongst', 'a', 'list', 'of', 'tables', 'with', 'optional', 'set', 'of', 'prefixes', 'to', 'apply', 'to', 'overlapping', 'column', 'names']
train
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L2981-L3048
4,613
jobovy/galpy
galpy/df/streamdf.py
streamdf._progenitor_setup
def _progenitor_setup(self,progenitor,leading,useTMHessian): """The part of the setup relating to the progenitor's orbit""" #Progenitor orbit: Calculate actions, frequencies, and angles for the progenitor self._progenitor= progenitor() #call to get new Orbit # Make sure we do not use physical coordinates self._progenitor.turn_physical_off() acfs= self._aA.actionsFreqsAngles(self._progenitor, _firstFlip=(not leading), use_physical=False) self._progenitor_jr= acfs[0][0] self._progenitor_lz= acfs[1][0] self._progenitor_jz= acfs[2][0] self._progenitor_Omegar= acfs[3] self._progenitor_Omegaphi= acfs[4] self._progenitor_Omegaz= acfs[5] self._progenitor_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3) self._progenitor_angler= acfs[6] self._progenitor_anglephi= acfs[7] self._progenitor_anglez= acfs[8] self._progenitor_angle= numpy.array([acfs[6],acfs[7],acfs[8]]).reshape(3) #Calculate dO/dJ Jacobian at the progenitor if useTMHessian: h, fr,fp,fz,e= self._aAT.hessianFreqs(self._progenitor_jr, self._progenitor_lz, self._progenitor_jz) self._dOdJp= h # Replace frequencies with TM frequencies self._progenitor_Omegar= fr self._progenitor_Omegaphi= fp self._progenitor_Omegaz= fz self._progenitor_Omega= numpy.array([self._progenitor_Omegar, self._progenitor_Omegaphi, self._progenitor_Omegaz]).reshape(3) else: self._dOdJp= calcaAJac(self._progenitor._orb.vxvv, self._aA,dxv=None,dOdJ=True, _initacfs=acfs) self._dOdJpInv= numpy.linalg.inv(self._dOdJp) self._dOdJpEig= numpy.linalg.eig(self._dOdJp) return None
python
def _progenitor_setup(self,progenitor,leading,useTMHessian): """The part of the setup relating to the progenitor's orbit""" #Progenitor orbit: Calculate actions, frequencies, and angles for the progenitor self._progenitor= progenitor() #call to get new Orbit # Make sure we do not use physical coordinates self._progenitor.turn_physical_off() acfs= self._aA.actionsFreqsAngles(self._progenitor, _firstFlip=(not leading), use_physical=False) self._progenitor_jr= acfs[0][0] self._progenitor_lz= acfs[1][0] self._progenitor_jz= acfs[2][0] self._progenitor_Omegar= acfs[3] self._progenitor_Omegaphi= acfs[4] self._progenitor_Omegaz= acfs[5] self._progenitor_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3) self._progenitor_angler= acfs[6] self._progenitor_anglephi= acfs[7] self._progenitor_anglez= acfs[8] self._progenitor_angle= numpy.array([acfs[6],acfs[7],acfs[8]]).reshape(3) #Calculate dO/dJ Jacobian at the progenitor if useTMHessian: h, fr,fp,fz,e= self._aAT.hessianFreqs(self._progenitor_jr, self._progenitor_lz, self._progenitor_jz) self._dOdJp= h # Replace frequencies with TM frequencies self._progenitor_Omegar= fr self._progenitor_Omegaphi= fp self._progenitor_Omegaz= fz self._progenitor_Omega= numpy.array([self._progenitor_Omegar, self._progenitor_Omegaphi, self._progenitor_Omegaz]).reshape(3) else: self._dOdJp= calcaAJac(self._progenitor._orb.vxvv, self._aA,dxv=None,dOdJ=True, _initacfs=acfs) self._dOdJpInv= numpy.linalg.inv(self._dOdJp) self._dOdJpEig= numpy.linalg.eig(self._dOdJp) return None
['def', '_progenitor_setup', '(', 'self', ',', 'progenitor', ',', 'leading', ',', 'useTMHessian', ')', ':', '#Progenitor orbit: Calculate actions, frequencies, and angles for the progenitor', 'self', '.', '_progenitor', '=', 'progenitor', '(', ')', '#call to get new Orbit', '# Make sure we do not use physical coordinates', 'self', '.', '_progenitor', '.', 'turn_physical_off', '(', ')', 'acfs', '=', 'self', '.', '_aA', '.', 'actionsFreqsAngles', '(', 'self', '.', '_progenitor', ',', '_firstFlip', '=', '(', 'not', 'leading', ')', ',', 'use_physical', '=', 'False', ')', 'self', '.', '_progenitor_jr', '=', 'acfs', '[', '0', ']', '[', '0', ']', 'self', '.', '_progenitor_lz', '=', 'acfs', '[', '1', ']', '[', '0', ']', 'self', '.', '_progenitor_jz', '=', 'acfs', '[', '2', ']', '[', '0', ']', 'self', '.', '_progenitor_Omegar', '=', 'acfs', '[', '3', ']', 'self', '.', '_progenitor_Omegaphi', '=', 'acfs', '[', '4', ']', 'self', '.', '_progenitor_Omegaz', '=', 'acfs', '[', '5', ']', 'self', '.', '_progenitor_Omega', '=', 'numpy', '.', 'array', '(', '[', 'acfs', '[', '3', ']', ',', 'acfs', '[', '4', ']', ',', 'acfs', '[', '5', ']', ']', ')', '.', 'reshape', '(', '3', ')', 'self', '.', '_progenitor_angler', '=', 'acfs', '[', '6', ']', 'self', '.', '_progenitor_anglephi', '=', 'acfs', '[', '7', ']', 'self', '.', '_progenitor_anglez', '=', 'acfs', '[', '8', ']', 'self', '.', '_progenitor_angle', '=', 'numpy', '.', 'array', '(', '[', 'acfs', '[', '6', ']', ',', 'acfs', '[', '7', ']', ',', 'acfs', '[', '8', ']', ']', ')', '.', 'reshape', '(', '3', ')', '#Calculate dO/dJ Jacobian at the progenitor', 'if', 'useTMHessian', ':', 'h', ',', 'fr', ',', 'fp', ',', 'fz', ',', 'e', '=', 'self', '.', '_aAT', '.', 'hessianFreqs', '(', 'self', '.', '_progenitor_jr', ',', 'self', '.', '_progenitor_lz', ',', 'self', '.', '_progenitor_jz', ')', 'self', '.', '_dOdJp', '=', 'h', '# Replace frequencies with TM frequencies', 'self', '.', '_progenitor_Omegar', '=', 'fr', 'self', '.', '_progenitor_Omegaphi', '=', 'fp', 'self', '.', '_progenitor_Omegaz', '=', 'fz', 'self', '.', '_progenitor_Omega', '=', 'numpy', '.', 'array', '(', '[', 'self', '.', '_progenitor_Omegar', ',', 'self', '.', '_progenitor_Omegaphi', ',', 'self', '.', '_progenitor_Omegaz', ']', ')', '.', 'reshape', '(', '3', ')', 'else', ':', 'self', '.', '_dOdJp', '=', 'calcaAJac', '(', 'self', '.', '_progenitor', '.', '_orb', '.', 'vxvv', ',', 'self', '.', '_aA', ',', 'dxv', '=', 'None', ',', 'dOdJ', '=', 'True', ',', '_initacfs', '=', 'acfs', ')', 'self', '.', '_dOdJpInv', '=', 'numpy', '.', 'linalg', '.', 'inv', '(', 'self', '.', '_dOdJp', ')', 'self', '.', '_dOdJpEig', '=', 'numpy', '.', 'linalg', '.', 'eig', '(', 'self', '.', '_dOdJp', ')', 'return', 'None']
The part of the setup relating to the progenitor's orbit
['The', 'part', 'of', 'the', 'setup', 'relating', 'to', 'the', 'progenitor', 's', 'orbit']
train
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/streamdf.py#L218-L257
4,614
paylogic/halogen
halogen/validators.py
Length.validate
def validate(self, value): """Validate the length of a list. :param value: List of values. :raises: :class:`halogen.exception.ValidationError` exception when length of the list is less than minimum or greater than maximum. """ try: length = len(value) except TypeError: length = 0 if self.min_length is not None: min_length = self.min_length() if callable(self.min_length) else self.min_length if length < min_length: raise exceptions.ValidationError(self.min_err.format(min_length)) if self.max_length is not None: max_length = self.max_length() if callable(self.max_length) else self.max_length if length > max_length: raise exceptions.ValidationError(self.max_err.format(max_length))
python
def validate(self, value): """Validate the length of a list. :param value: List of values. :raises: :class:`halogen.exception.ValidationError` exception when length of the list is less than minimum or greater than maximum. """ try: length = len(value) except TypeError: length = 0 if self.min_length is not None: min_length = self.min_length() if callable(self.min_length) else self.min_length if length < min_length: raise exceptions.ValidationError(self.min_err.format(min_length)) if self.max_length is not None: max_length = self.max_length() if callable(self.max_length) else self.max_length if length > max_length: raise exceptions.ValidationError(self.max_err.format(max_length))
['def', 'validate', '(', 'self', ',', 'value', ')', ':', 'try', ':', 'length', '=', 'len', '(', 'value', ')', 'except', 'TypeError', ':', 'length', '=', '0', 'if', 'self', '.', 'min_length', 'is', 'not', 'None', ':', 'min_length', '=', 'self', '.', 'min_length', '(', ')', 'if', 'callable', '(', 'self', '.', 'min_length', ')', 'else', 'self', '.', 'min_length', 'if', 'length', '<', 'min_length', ':', 'raise', 'exceptions', '.', 'ValidationError', '(', 'self', '.', 'min_err', '.', 'format', '(', 'min_length', ')', ')', 'if', 'self', '.', 'max_length', 'is', 'not', 'None', ':', 'max_length', '=', 'self', '.', 'max_length', '(', ')', 'if', 'callable', '(', 'self', '.', 'max_length', ')', 'else', 'self', '.', 'max_length', 'if', 'length', '>', 'max_length', ':', 'raise', 'exceptions', '.', 'ValidationError', '(', 'self', '.', 'max_err', '.', 'format', '(', 'max_length', ')', ')']
Validate the length of a list. :param value: List of values. :raises: :class:`halogen.exception.ValidationError` exception when length of the list is less than minimum or greater than maximum.
['Validate', 'the', 'length', 'of', 'a', 'list', '.']
train
https://github.com/paylogic/halogen/blob/2dec0a67c506d02d1f51915fa7163f59764a0bde/halogen/validators.py#L92-L113
4,615
pypa/pipenv
pipenv/vendor/vistir/path.py
is_valid_url
def is_valid_url(url): """Checks if a given string is an url""" from .misc import to_text if not url: return url pieces = urllib_parse.urlparse(to_text(url)) return all([pieces.scheme, pieces.netloc])
python
def is_valid_url(url): """Checks if a given string is an url""" from .misc import to_text if not url: return url pieces = urllib_parse.urlparse(to_text(url)) return all([pieces.scheme, pieces.netloc])
['def', 'is_valid_url', '(', 'url', ')', ':', 'from', '.', 'misc', 'import', 'to_text', 'if', 'not', 'url', ':', 'return', 'url', 'pieces', '=', 'urllib_parse', '.', 'urlparse', '(', 'to_text', '(', 'url', ')', ')', 'return', 'all', '(', '[', 'pieces', '.', 'scheme', ',', 'pieces', '.', 'netloc', ']', ')']
Checks if a given string is an url
['Checks', 'if', 'a', 'given', 'string', 'is', 'an', 'url']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/path.py#L188-L195
4,616
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_help.py
HelpModule.idle_task
def idle_task(self): '''called on idle''' if self.module('console') is not None and not self.menu_added_console: self.menu_added_console = True self.module('console').add_menu(self.menu)
python
def idle_task(self): '''called on idle''' if self.module('console') is not None and not self.menu_added_console: self.menu_added_console = True self.module('console').add_menu(self.menu)
['def', 'idle_task', '(', 'self', ')', ':', 'if', 'self', '.', 'module', '(', "'console'", ')', 'is', 'not', 'None', 'and', 'not', 'self', '.', 'menu_added_console', ':', 'self', '.', 'menu_added_console', '=', 'True', 'self', '.', 'module', '(', "'console'", ')', '.', 'add_menu', '(', 'self', '.', 'menu', ')']
called on idle
['called', 'on', 'idle']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_help.py#L90-L94
4,617
lreis2415/PyGeoC
examples/ex07_handling_raster_with_numpy.py
main
def main(): """Read GeoTiff raster data and perform log transformation. """ input_tif = "../tests/data/Jamaica_dem.tif" output_tif = "../tests/data/tmp_results/log_dem.tif" rst = RasterUtilClass.read_raster(input_tif) # raster data (with noDataValue as numpy.nan) as numpy array rst_valid = rst.validValues output_data = np.log(rst_valid) # write output raster RasterUtilClass.write_gtiff_file(output_tif, rst.nRows, rst.nCols, output_data, rst.geotrans, rst.srs, rst.noDataValue, rst.dataType)
python
def main(): """Read GeoTiff raster data and perform log transformation. """ input_tif = "../tests/data/Jamaica_dem.tif" output_tif = "../tests/data/tmp_results/log_dem.tif" rst = RasterUtilClass.read_raster(input_tif) # raster data (with noDataValue as numpy.nan) as numpy array rst_valid = rst.validValues output_data = np.log(rst_valid) # write output raster RasterUtilClass.write_gtiff_file(output_tif, rst.nRows, rst.nCols, output_data, rst.geotrans, rst.srs, rst.noDataValue, rst.dataType)
['def', 'main', '(', ')', ':', 'input_tif', '=', '"../tests/data/Jamaica_dem.tif"', 'output_tif', '=', '"../tests/data/tmp_results/log_dem.tif"', 'rst', '=', 'RasterUtilClass', '.', 'read_raster', '(', 'input_tif', ')', '# raster data (with noDataValue as numpy.nan) as numpy array', 'rst_valid', '=', 'rst', '.', 'validValues', 'output_data', '=', 'np', '.', 'log', '(', 'rst_valid', ')', '# write output raster', 'RasterUtilClass', '.', 'write_gtiff_file', '(', 'output_tif', ',', 'rst', '.', 'nRows', ',', 'rst', '.', 'nCols', ',', 'output_data', ',', 'rst', '.', 'geotrans', ',', 'rst', '.', 'srs', ',', 'rst', '.', 'noDataValue', ',', 'rst', '.', 'dataType', ')']
Read GeoTiff raster data and perform log transformation.
['Read', 'GeoTiff', 'raster', 'data', 'and', 'perform', 'log', 'transformation', '.']
train
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/examples/ex07_handling_raster_with_numpy.py#L10-L21
4,618
Celeo/Pycord
pycord/__init__.py
Pycord._setup_logger
def _setup_logger(self, logging_level: int, log_to_console: bool): """Sets up the internal logger Args: logging_level: what logging level to use log_to_console: whether or not to log to the console """ self.logger = logging.getLogger('discord') self.logger.handlers = [] self.logger.setLevel(logging_level) formatter = logging.Formatter(style='{', fmt='{asctime} [{levelname}] {message}', datefmt='%Y-%m-%d %H:%M:%S') file_handler = logging.FileHandler('pycord.log') file_handler.setFormatter(formatter) file_handler.setLevel(logging_level) self.logger.addHandler(file_handler) if log_to_console: stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(formatter) stream_handler.setLevel(logging_level) self.logger.addHandler(stream_handler)
python
def _setup_logger(self, logging_level: int, log_to_console: bool): """Sets up the internal logger Args: logging_level: what logging level to use log_to_console: whether or not to log to the console """ self.logger = logging.getLogger('discord') self.logger.handlers = [] self.logger.setLevel(logging_level) formatter = logging.Formatter(style='{', fmt='{asctime} [{levelname}] {message}', datefmt='%Y-%m-%d %H:%M:%S') file_handler = logging.FileHandler('pycord.log') file_handler.setFormatter(formatter) file_handler.setLevel(logging_level) self.logger.addHandler(file_handler) if log_to_console: stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(formatter) stream_handler.setLevel(logging_level) self.logger.addHandler(stream_handler)
['def', '_setup_logger', '(', 'self', ',', 'logging_level', ':', 'int', ',', 'log_to_console', ':', 'bool', ')', ':', 'self', '.', 'logger', '=', 'logging', '.', 'getLogger', '(', "'discord'", ')', 'self', '.', 'logger', '.', 'handlers', '=', '[', ']', 'self', '.', 'logger', '.', 'setLevel', '(', 'logging_level', ')', 'formatter', '=', 'logging', '.', 'Formatter', '(', 'style', '=', "'{'", ',', 'fmt', '=', "'{asctime} [{levelname}] {message}'", ',', 'datefmt', '=', "'%Y-%m-%d %H:%M:%S'", ')', 'file_handler', '=', 'logging', '.', 'FileHandler', '(', "'pycord.log'", ')', 'file_handler', '.', 'setFormatter', '(', 'formatter', ')', 'file_handler', '.', 'setLevel', '(', 'logging_level', ')', 'self', '.', 'logger', '.', 'addHandler', '(', 'file_handler', ')', 'if', 'log_to_console', ':', 'stream_handler', '=', 'logging', '.', 'StreamHandler', '(', 'sys', '.', 'stdout', ')', 'stream_handler', '.', 'setFormatter', '(', 'formatter', ')', 'stream_handler', '.', 'setLevel', '(', 'logging_level', ')', 'self', '.', 'logger', '.', 'addHandler', '(', 'stream_handler', ')']
Sets up the internal logger Args: logging_level: what logging level to use log_to_console: whether or not to log to the console
['Sets', 'up', 'the', 'internal', 'logger']
train
https://github.com/Celeo/Pycord/blob/15c38e39b508c89c35f7f6d7009fe8e9f161a94e/pycord/__init__.py#L181-L200
4,619
jaredLunde/redis_structures
redis_structures/__init__.py
RedisSortedSet.decr
def decr(self, member, by=1): """ Decrements @member by @by within the sorted set """ return self._client.zincrby( self.key_prefix, self._dumps(member), by * -1)
python
def decr(self, member, by=1): """ Decrements @member by @by within the sorted set """ return self._client.zincrby( self.key_prefix, self._dumps(member), by * -1)
['def', 'decr', '(', 'self', ',', 'member', ',', 'by', '=', '1', ')', ':', 'return', 'self', '.', '_client', '.', 'zincrby', '(', 'self', '.', 'key_prefix', ',', 'self', '.', '_dumps', '(', 'member', ')', ',', 'by', '*', '-', '1', ')']
Decrements @member by @by within the sorted set
['Decrements']
train
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/__init__.py#L2047-L2050
4,620
Azure/azure-event-hubs-python
azure/eventhub/sender.py
Sender.send
def send(self, event_data): """ Sends an event data and blocks until acknowledgement is received or operation times out. :param event_data: The event to be sent. :type event_data: ~azure.eventhub.common.EventData :raises: ~azure.eventhub.common.EventHubError if the message fails to send. :return: The outcome of the message send. :rtype: ~uamqp.constants.MessageSendResult """ if self.error: raise self.error if not self.running: raise ValueError("Unable to send until client has been started.") if event_data.partition_key and self.partition: raise ValueError("EventData partition key cannot be used with a partition sender.") event_data.message.on_send_complete = self._on_outcome try: self._handler.send_message(event_data.message) if self._outcome != constants.MessageSendResult.Ok: raise Sender._error(self._outcome, self._condition) except errors.MessageException as failed: error = EventHubError(str(failed), failed) self.close(exception=error) raise error except (errors.TokenExpired, errors.AuthenticationException): log.info("Sender disconnected due to token error. Attempting reconnect.") self.reconnect() except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: log.info("Sender detached. Attempting reconnect.") self.reconnect() else: log.info("Sender detached. Shutting down.") error = EventHubError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if self.auto_reconnect: log.info("Sender detached. Attempting reconnect.") self.reconnect() else: log.info("Sender detached. Shutting down.") error = EventHubError(str(shutdown), shutdown) self.close(exception=error) raise error except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Send failed: {}".format(e)) self.close(exception=error) raise error else: return self._outcome
python
def send(self, event_data): """ Sends an event data and blocks until acknowledgement is received or operation times out. :param event_data: The event to be sent. :type event_data: ~azure.eventhub.common.EventData :raises: ~azure.eventhub.common.EventHubError if the message fails to send. :return: The outcome of the message send. :rtype: ~uamqp.constants.MessageSendResult """ if self.error: raise self.error if not self.running: raise ValueError("Unable to send until client has been started.") if event_data.partition_key and self.partition: raise ValueError("EventData partition key cannot be used with a partition sender.") event_data.message.on_send_complete = self._on_outcome try: self._handler.send_message(event_data.message) if self._outcome != constants.MessageSendResult.Ok: raise Sender._error(self._outcome, self._condition) except errors.MessageException as failed: error = EventHubError(str(failed), failed) self.close(exception=error) raise error except (errors.TokenExpired, errors.AuthenticationException): log.info("Sender disconnected due to token error. Attempting reconnect.") self.reconnect() except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: log.info("Sender detached. Attempting reconnect.") self.reconnect() else: log.info("Sender detached. Shutting down.") error = EventHubError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if self.auto_reconnect: log.info("Sender detached. Attempting reconnect.") self.reconnect() else: log.info("Sender detached. Shutting down.") error = EventHubError(str(shutdown), shutdown) self.close(exception=error) raise error except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Send failed: {}".format(e)) self.close(exception=error) raise error else: return self._outcome
['def', 'send', '(', 'self', ',', 'event_data', ')', ':', 'if', 'self', '.', 'error', ':', 'raise', 'self', '.', 'error', 'if', 'not', 'self', '.', 'running', ':', 'raise', 'ValueError', '(', '"Unable to send until client has been started."', ')', 'if', 'event_data', '.', 'partition_key', 'and', 'self', '.', 'partition', ':', 'raise', 'ValueError', '(', '"EventData partition key cannot be used with a partition sender."', ')', 'event_data', '.', 'message', '.', 'on_send_complete', '=', 'self', '.', '_on_outcome', 'try', ':', 'self', '.', '_handler', '.', 'send_message', '(', 'event_data', '.', 'message', ')', 'if', 'self', '.', '_outcome', '!=', 'constants', '.', 'MessageSendResult', '.', 'Ok', ':', 'raise', 'Sender', '.', '_error', '(', 'self', '.', '_outcome', ',', 'self', '.', '_condition', ')', 'except', 'errors', '.', 'MessageException', 'as', 'failed', ':', 'error', '=', 'EventHubError', '(', 'str', '(', 'failed', ')', ',', 'failed', ')', 'self', '.', 'close', '(', 'exception', '=', 'error', ')', 'raise', 'error', 'except', '(', 'errors', '.', 'TokenExpired', ',', 'errors', '.', 'AuthenticationException', ')', ':', 'log', '.', 'info', '(', '"Sender disconnected due to token error. Attempting reconnect."', ')', 'self', '.', 'reconnect', '(', ')', 'except', '(', 'errors', '.', 'LinkDetach', ',', 'errors', '.', 'ConnectionClose', ')', 'as', 'shutdown', ':', 'if', 'shutdown', '.', 'action', '.', 'retry', 'and', 'self', '.', 'auto_reconnect', ':', 'log', '.', 'info', '(', '"Sender detached. Attempting reconnect."', ')', 'self', '.', 'reconnect', '(', ')', 'else', ':', 'log', '.', 'info', '(', '"Sender detached. Shutting down."', ')', 'error', '=', 'EventHubError', '(', 'str', '(', 'shutdown', ')', ',', 'shutdown', ')', 'self', '.', 'close', '(', 'exception', '=', 'error', ')', 'raise', 'error', 'except', 'errors', '.', 'MessageHandlerError', 'as', 'shutdown', ':', 'if', 'self', '.', 'auto_reconnect', ':', 'log', '.', 'info', '(', '"Sender detached. Attempting reconnect."', ')', 'self', '.', 'reconnect', '(', ')', 'else', ':', 'log', '.', 'info', '(', '"Sender detached. Shutting down."', ')', 'error', '=', 'EventHubError', '(', 'str', '(', 'shutdown', ')', ',', 'shutdown', ')', 'self', '.', 'close', '(', 'exception', '=', 'error', ')', 'raise', 'error', 'except', 'Exception', 'as', 'e', ':', 'log', '.', 'info', '(', '"Unexpected error occurred (%r). Shutting down."', ',', 'e', ')', 'error', '=', 'EventHubError', '(', '"Send failed: {}"', '.', 'format', '(', 'e', ')', ')', 'self', '.', 'close', '(', 'exception', '=', 'error', ')', 'raise', 'error', 'else', ':', 'return', 'self', '.', '_outcome']
Sends an event data and blocks until acknowledgement is received or operation times out. :param event_data: The event to be sent. :type event_data: ~azure.eventhub.common.EventData :raises: ~azure.eventhub.common.EventHubError if the message fails to send. :return: The outcome of the message send. :rtype: ~uamqp.constants.MessageSendResult
['Sends', 'an', 'event', 'data', 'and', 'blocks', 'until', 'acknowledgement', 'is', 'received', 'or', 'operation', 'times', 'out', '.']
train
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventhub/sender.py#L211-L265
4,621
PredixDev/predixpy
predix/admin/cf/spaces.py
Space.create_space
def create_space(self, space_name, add_users=True): """ Create a new space with the given name in the current target organization. """ body = { 'name': space_name, 'organization_guid': self.api.config.get_organization_guid() } # MAINT: may need to do this more generally later if add_users: space_users = [] org_users = self.org.get_users() for org_user in org_users['resources']: guid = org_user['metadata']['guid'] space_users.append(guid) body['manager_guids'] = space_users body['developer_guids'] = space_users return self.api.post('/v2/spaces', body)
python
def create_space(self, space_name, add_users=True): """ Create a new space with the given name in the current target organization. """ body = { 'name': space_name, 'organization_guid': self.api.config.get_organization_guid() } # MAINT: may need to do this more generally later if add_users: space_users = [] org_users = self.org.get_users() for org_user in org_users['resources']: guid = org_user['metadata']['guid'] space_users.append(guid) body['manager_guids'] = space_users body['developer_guids'] = space_users return self.api.post('/v2/spaces', body)
['def', 'create_space', '(', 'self', ',', 'space_name', ',', 'add_users', '=', 'True', ')', ':', 'body', '=', '{', "'name'", ':', 'space_name', ',', "'organization_guid'", ':', 'self', '.', 'api', '.', 'config', '.', 'get_organization_guid', '(', ')', '}', '# MAINT: may need to do this more generally later', 'if', 'add_users', ':', 'space_users', '=', '[', ']', 'org_users', '=', 'self', '.', 'org', '.', 'get_users', '(', ')', 'for', 'org_user', 'in', 'org_users', '[', "'resources'", ']', ':', 'guid', '=', 'org_user', '[', "'metadata'", ']', '[', "'guid'", ']', 'space_users', '.', 'append', '(', 'guid', ')', 'body', '[', "'manager_guids'", ']', '=', 'space_users', 'body', '[', "'developer_guids'", ']', '=', 'space_users', 'return', 'self', '.', 'api', '.', 'post', '(', "'/v2/spaces'", ',', 'body', ')']
Create a new space with the given name in the current target organization.
['Create', 'a', 'new', 'space', 'with', 'the', 'given', 'name', 'in', 'the', 'current', 'target', 'organization', '.']
train
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/cf/spaces.py#L83-L104
4,622
PrefPy/prefpy
prefpy/gmmra.py
GMMPLAggregator._pos
def _pos(self, k): """ Description: Position k breaking Parameters: k: position k is used for the breaking """ if k < 2: raise ValueError("k smaller than 2") G = np.zeros((self.m, self.m)) for i in range(self.m): for j in range(self.m): if i == j: continue if i < k or j < k: continue if i == k or j == k: G[i][j] = 1 return G
python
def _pos(self, k): """ Description: Position k breaking Parameters: k: position k is used for the breaking """ if k < 2: raise ValueError("k smaller than 2") G = np.zeros((self.m, self.m)) for i in range(self.m): for j in range(self.m): if i == j: continue if i < k or j < k: continue if i == k or j == k: G[i][j] = 1 return G
['def', '_pos', '(', 'self', ',', 'k', ')', ':', 'if', 'k', '<', '2', ':', 'raise', 'ValueError', '(', '"k smaller than 2"', ')', 'G', '=', 'np', '.', 'zeros', '(', '(', 'self', '.', 'm', ',', 'self', '.', 'm', ')', ')', 'for', 'i', 'in', 'range', '(', 'self', '.', 'm', ')', ':', 'for', 'j', 'in', 'range', '(', 'self', '.', 'm', ')', ':', 'if', 'i', '==', 'j', ':', 'continue', 'if', 'i', '<', 'k', 'or', 'j', '<', 'k', ':', 'continue', 'if', 'i', '==', 'k', 'or', 'j', '==', 'k', ':', 'G', '[', 'i', ']', '[', 'j', ']', '=', '1', 'return', 'G']
Description: Position k breaking Parameters: k: position k is used for the breaking
['Description', ':', 'Position', 'k', 'breaking', 'Parameters', ':', 'k', ':', 'position', 'k', 'is', 'used', 'for', 'the', 'breaking']
train
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/gmmra.py#L80-L98
4,623
openstack/networking-cisco
networking_cisco/ml2_drivers/ucsm/deprecated_network_driver.py
CiscoUcsmDriver._create_port_profile
def _create_port_profile(self, handle, profile_name, vlan_id, vnic_type, ucsm_ip, trunk_vlans, qos_policy): """Creates a Port Profile on the UCS Manager. Significant parameters set in the port profile are: 1. Port profile name - Should match what was set in vif_details 2. High performance mode - For VM-FEX to be enabled/configured on the port using this port profile, this mode should be enabled. 3. Vlan id - Vlan id used by traffic to and from the port. """ port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + profile_name) vlan_name = self.make_vlan_name(vlan_id) vlan_associate_path = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + profile_name + const.VLAN_PATH_PREFIX + vlan_name) cl_profile_name = const.CLIENT_PROFILE_NAME_PREFIX + str(vlan_id) cl_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + profile_name + const.CLIENT_PROFILE_PATH_PREFIX + cl_profile_name) # Remove this Port Profile from the delete DB table if it was # addded there due to a previous delete. self.ucsm_db.remove_port_profile_to_delete(profile_name, ucsm_ip) # Check if direct or macvtap mode if vnic_type == bc.portbindings.VNIC_DIRECT: port_mode = const.HIGH_PERF else: port_mode = const.NONE try: handle.StartTransaction() port_profile = handle.GetManagedObject( None, self.ucsmsdk.VnicProfileSet.ClassId(), {self.ucsmsdk.VnicProfileSet.DN: const.PORT_PROFILESETDN}) if not port_profile: LOG.warning('UCS Manager network driver Port Profile ' 'path at %s missing', const.PORT_PROFILESETDN) return False # Create a port profile on the UCS Manager p_profile = handle.AddManagedObject( port_profile, self.ucsmsdk.VnicProfile.ClassId(), {self.ucsmsdk.VnicProfile.NAME: profile_name, self.ucsmsdk.VnicProfile.POLICY_OWNER: "local", self.ucsmsdk.VnicProfile.NW_CTRL_POLICY_NAME: "", self.ucsmsdk.VnicProfile.PIN_TO_GROUP_NAME: "", self.ucsmsdk.VnicProfile.DN: port_profile_dest, self.ucsmsdk.VnicProfile.DESCR: const.DESCR, self.ucsmsdk.VnicProfile.QOS_POLICY_NAME: qos_policy, self.ucsmsdk.VnicProfile.HOST_NW_IOPERF: port_mode, self.ucsmsdk.VnicProfile.MAX_PORTS: const.MAX_PORTS}) if not p_profile: LOG.warning('UCS Manager network driver could not ' 'create Port Profile %s.', profile_name) return False LOG.debug('UCS Manager network driver associating Vlan ' 'Profile with Port Profile at %s', vlan_associate_path) # Associate port profile with vlan profile mo = handle.AddManagedObject( p_profile, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_associate_path, self.ucsmsdk.VnicEtherIf.NAME: vlan_name, self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "yes"}, True) if not mo: LOG.warning('UCS Manager network driver cannot ' 'associate Vlan Profile to Port ' 'Profile %s', profile_name) return False LOG.debug('UCS Manager network driver created Port Profile %s ' 'at %s', profile_name, port_profile_dest) # For Multi VLAN trunk support if trunk_vlans: for vlan in trunk_vlans: vlan_name = self.make_vlan_name(vlan) vlan_associate_path = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + profile_name + const.VLAN_PATH_PREFIX + vlan_name) # Associate port profile with vlan profile # for the trunk vlans mo = handle.AddManagedObject( p_profile, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_associate_path, self.ucsmsdk.VnicEtherIf.NAME: vlan_name, self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True) if not mo: LOG.warning('UCS Manager network driver cannot ' 'associate Vlan %(vlan)d to Port ' 'Profile %(profile)s', {'vlan': vlan, 'profile': profile_name}) cl_profile = handle.AddManagedObject( p_profile, self.ucsmsdk.VmVnicProfCl.ClassId(), {self.ucsmsdk.VmVnicProfCl.ORG_PATH: ".*", self.ucsmsdk.VmVnicProfCl.DN: cl_profile_dest, self.ucsmsdk.VmVnicProfCl.NAME: cl_profile_name, self.ucsmsdk.VmVnicProfCl.POLICY_OWNER: "local", self.ucsmsdk.VmVnicProfCl.SW_NAME: ".*", self.ucsmsdk.VmVnicProfCl.DC_NAME: ".*", self.ucsmsdk.VmVnicProfCl.DESCR: const.DESCR}) handle.CompleteTransaction() if not cl_profile: LOG.warning('UCS Manager network driver could not ' 'create Client Profile %s.', cl_profile_name) return False LOG.debug('UCS Manager network driver created Client Profile ' '%s at %s', cl_profile_name, cl_profile_dest) return True except Exception as e: return self._handle_ucsm_exception(e, 'Port Profile', profile_name, ucsm_ip)
python
def _create_port_profile(self, handle, profile_name, vlan_id, vnic_type, ucsm_ip, trunk_vlans, qos_policy): """Creates a Port Profile on the UCS Manager. Significant parameters set in the port profile are: 1. Port profile name - Should match what was set in vif_details 2. High performance mode - For VM-FEX to be enabled/configured on the port using this port profile, this mode should be enabled. 3. Vlan id - Vlan id used by traffic to and from the port. """ port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + profile_name) vlan_name = self.make_vlan_name(vlan_id) vlan_associate_path = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + profile_name + const.VLAN_PATH_PREFIX + vlan_name) cl_profile_name = const.CLIENT_PROFILE_NAME_PREFIX + str(vlan_id) cl_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + profile_name + const.CLIENT_PROFILE_PATH_PREFIX + cl_profile_name) # Remove this Port Profile from the delete DB table if it was # addded there due to a previous delete. self.ucsm_db.remove_port_profile_to_delete(profile_name, ucsm_ip) # Check if direct or macvtap mode if vnic_type == bc.portbindings.VNIC_DIRECT: port_mode = const.HIGH_PERF else: port_mode = const.NONE try: handle.StartTransaction() port_profile = handle.GetManagedObject( None, self.ucsmsdk.VnicProfileSet.ClassId(), {self.ucsmsdk.VnicProfileSet.DN: const.PORT_PROFILESETDN}) if not port_profile: LOG.warning('UCS Manager network driver Port Profile ' 'path at %s missing', const.PORT_PROFILESETDN) return False # Create a port profile on the UCS Manager p_profile = handle.AddManagedObject( port_profile, self.ucsmsdk.VnicProfile.ClassId(), {self.ucsmsdk.VnicProfile.NAME: profile_name, self.ucsmsdk.VnicProfile.POLICY_OWNER: "local", self.ucsmsdk.VnicProfile.NW_CTRL_POLICY_NAME: "", self.ucsmsdk.VnicProfile.PIN_TO_GROUP_NAME: "", self.ucsmsdk.VnicProfile.DN: port_profile_dest, self.ucsmsdk.VnicProfile.DESCR: const.DESCR, self.ucsmsdk.VnicProfile.QOS_POLICY_NAME: qos_policy, self.ucsmsdk.VnicProfile.HOST_NW_IOPERF: port_mode, self.ucsmsdk.VnicProfile.MAX_PORTS: const.MAX_PORTS}) if not p_profile: LOG.warning('UCS Manager network driver could not ' 'create Port Profile %s.', profile_name) return False LOG.debug('UCS Manager network driver associating Vlan ' 'Profile with Port Profile at %s', vlan_associate_path) # Associate port profile with vlan profile mo = handle.AddManagedObject( p_profile, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_associate_path, self.ucsmsdk.VnicEtherIf.NAME: vlan_name, self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "yes"}, True) if not mo: LOG.warning('UCS Manager network driver cannot ' 'associate Vlan Profile to Port ' 'Profile %s', profile_name) return False LOG.debug('UCS Manager network driver created Port Profile %s ' 'at %s', profile_name, port_profile_dest) # For Multi VLAN trunk support if trunk_vlans: for vlan in trunk_vlans: vlan_name = self.make_vlan_name(vlan) vlan_associate_path = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + profile_name + const.VLAN_PATH_PREFIX + vlan_name) # Associate port profile with vlan profile # for the trunk vlans mo = handle.AddManagedObject( p_profile, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_associate_path, self.ucsmsdk.VnicEtherIf.NAME: vlan_name, self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True) if not mo: LOG.warning('UCS Manager network driver cannot ' 'associate Vlan %(vlan)d to Port ' 'Profile %(profile)s', {'vlan': vlan, 'profile': profile_name}) cl_profile = handle.AddManagedObject( p_profile, self.ucsmsdk.VmVnicProfCl.ClassId(), {self.ucsmsdk.VmVnicProfCl.ORG_PATH: ".*", self.ucsmsdk.VmVnicProfCl.DN: cl_profile_dest, self.ucsmsdk.VmVnicProfCl.NAME: cl_profile_name, self.ucsmsdk.VmVnicProfCl.POLICY_OWNER: "local", self.ucsmsdk.VmVnicProfCl.SW_NAME: ".*", self.ucsmsdk.VmVnicProfCl.DC_NAME: ".*", self.ucsmsdk.VmVnicProfCl.DESCR: const.DESCR}) handle.CompleteTransaction() if not cl_profile: LOG.warning('UCS Manager network driver could not ' 'create Client Profile %s.', cl_profile_name) return False LOG.debug('UCS Manager network driver created Client Profile ' '%s at %s', cl_profile_name, cl_profile_dest) return True except Exception as e: return self._handle_ucsm_exception(e, 'Port Profile', profile_name, ucsm_ip)
['def', '_create_port_profile', '(', 'self', ',', 'handle', ',', 'profile_name', ',', 'vlan_id', ',', 'vnic_type', ',', 'ucsm_ip', ',', 'trunk_vlans', ',', 'qos_policy', ')', ':', 'port_profile_dest', '=', '(', 'const', '.', 'PORT_PROFILESETDN', '+', 'const', '.', 'VNIC_PATH_PREFIX', '+', 'profile_name', ')', 'vlan_name', '=', 'self', '.', 'make_vlan_name', '(', 'vlan_id', ')', 'vlan_associate_path', '=', '(', 'const', '.', 'PORT_PROFILESETDN', '+', 'const', '.', 'VNIC_PATH_PREFIX', '+', 'profile_name', '+', 'const', '.', 'VLAN_PATH_PREFIX', '+', 'vlan_name', ')', 'cl_profile_name', '=', 'const', '.', 'CLIENT_PROFILE_NAME_PREFIX', '+', 'str', '(', 'vlan_id', ')', 'cl_profile_dest', '=', '(', 'const', '.', 'PORT_PROFILESETDN', '+', 'const', '.', 'VNIC_PATH_PREFIX', '+', 'profile_name', '+', 'const', '.', 'CLIENT_PROFILE_PATH_PREFIX', '+', 'cl_profile_name', ')', '# Remove this Port Profile from the delete DB table if it was', '# addded there due to a previous delete.', 'self', '.', 'ucsm_db', '.', 'remove_port_profile_to_delete', '(', 'profile_name', ',', 'ucsm_ip', ')', '# Check if direct or macvtap mode', 'if', 'vnic_type', '==', 'bc', '.', 'portbindings', '.', 'VNIC_DIRECT', ':', 'port_mode', '=', 'const', '.', 'HIGH_PERF', 'else', ':', 'port_mode', '=', 'const', '.', 'NONE', 'try', ':', 'handle', '.', 'StartTransaction', '(', ')', 'port_profile', '=', 'handle', '.', 'GetManagedObject', '(', 'None', ',', 'self', '.', 'ucsmsdk', '.', 'VnicProfileSet', '.', 'ClassId', '(', ')', ',', '{', 'self', '.', 'ucsmsdk', '.', 'VnicProfileSet', '.', 'DN', ':', 'const', '.', 'PORT_PROFILESETDN', '}', ')', 'if', 'not', 'port_profile', ':', 'LOG', '.', 'warning', '(', "'UCS Manager network driver Port Profile '", "'path at %s missing'", ',', 'const', '.', 'PORT_PROFILESETDN', ')', 'return', 'False', '# Create a port profile on the UCS Manager', 'p_profile', '=', 'handle', '.', 'AddManagedObject', '(', 'port_profile', ',', 'self', '.', 'ucsmsdk', '.', 'VnicProfile', '.', 'ClassId', '(', ')', ',', '{', 'self', '.', 'ucsmsdk', '.', 'VnicProfile', '.', 'NAME', ':', 'profile_name', ',', 'self', '.', 'ucsmsdk', '.', 'VnicProfile', '.', 'POLICY_OWNER', ':', '"local"', ',', 'self', '.', 'ucsmsdk', '.', 'VnicProfile', '.', 'NW_CTRL_POLICY_NAME', ':', '""', ',', 'self', '.', 'ucsmsdk', '.', 'VnicProfile', '.', 'PIN_TO_GROUP_NAME', ':', '""', ',', 'self', '.', 'ucsmsdk', '.', 'VnicProfile', '.', 'DN', ':', 'port_profile_dest', ',', 'self', '.', 'ucsmsdk', '.', 'VnicProfile', '.', 'DESCR', ':', 'const', '.', 'DESCR', ',', 'self', '.', 'ucsmsdk', '.', 'VnicProfile', '.', 'QOS_POLICY_NAME', ':', 'qos_policy', ',', 'self', '.', 'ucsmsdk', '.', 'VnicProfile', '.', 'HOST_NW_IOPERF', ':', 'port_mode', ',', 'self', '.', 'ucsmsdk', '.', 'VnicProfile', '.', 'MAX_PORTS', ':', 'const', '.', 'MAX_PORTS', '}', ')', 'if', 'not', 'p_profile', ':', 'LOG', '.', 'warning', '(', "'UCS Manager network driver could not '", "'create Port Profile %s.'", ',', 'profile_name', ')', 'return', 'False', 'LOG', '.', 'debug', '(', "'UCS Manager network driver associating Vlan '", "'Profile with Port Profile at %s'", ',', 'vlan_associate_path', ')', '# Associate port profile with vlan profile', 'mo', '=', 'handle', '.', 'AddManagedObject', '(', 'p_profile', ',', 'self', '.', 'ucsmsdk', '.', 'VnicEtherIf', '.', 'ClassId', '(', ')', ',', '{', 'self', '.', 'ucsmsdk', '.', 'VnicEtherIf', '.', 'DN', ':', 'vlan_associate_path', ',', 'self', '.', 'ucsmsdk', '.', 'VnicEtherIf', '.', 'NAME', ':', 'vlan_name', ',', 'self', '.', 'ucsmsdk', '.', 'VnicEtherIf', '.', 'DEFAULT_NET', ':', '"yes"', '}', ',', 'True', ')', 'if', 'not', 'mo', ':', 'LOG', '.', 'warning', '(', "'UCS Manager network driver cannot '", "'associate Vlan Profile to Port '", "'Profile %s'", ',', 'profile_name', ')', 'return', 'False', 'LOG', '.', 'debug', '(', "'UCS Manager network driver created Port Profile %s '", "'at %s'", ',', 'profile_name', ',', 'port_profile_dest', ')', '# For Multi VLAN trunk support', 'if', 'trunk_vlans', ':', 'for', 'vlan', 'in', 'trunk_vlans', ':', 'vlan_name', '=', 'self', '.', 'make_vlan_name', '(', 'vlan', ')', 'vlan_associate_path', '=', '(', 'const', '.', 'PORT_PROFILESETDN', '+', 'const', '.', 'VNIC_PATH_PREFIX', '+', 'profile_name', '+', 'const', '.', 'VLAN_PATH_PREFIX', '+', 'vlan_name', ')', '# Associate port profile with vlan profile', '# for the trunk vlans', 'mo', '=', 'handle', '.', 'AddManagedObject', '(', 'p_profile', ',', 'self', '.', 'ucsmsdk', '.', 'VnicEtherIf', '.', 'ClassId', '(', ')', ',', '{', 'self', '.', 'ucsmsdk', '.', 'VnicEtherIf', '.', 'DN', ':', 'vlan_associate_path', ',', 'self', '.', 'ucsmsdk', '.', 'VnicEtherIf', '.', 'NAME', ':', 'vlan_name', ',', 'self', '.', 'ucsmsdk', '.', 'VnicEtherIf', '.', 'DEFAULT_NET', ':', '"no"', '}', ',', 'True', ')', 'if', 'not', 'mo', ':', 'LOG', '.', 'warning', '(', "'UCS Manager network driver cannot '", "'associate Vlan %(vlan)d to Port '", "'Profile %(profile)s'", ',', '{', "'vlan'", ':', 'vlan', ',', "'profile'", ':', 'profile_name', '}', ')', 'cl_profile', '=', 'handle', '.', 'AddManagedObject', '(', 'p_profile', ',', 'self', '.', 'ucsmsdk', '.', 'VmVnicProfCl', '.', 'ClassId', '(', ')', ',', '{', 'self', '.', 'ucsmsdk', '.', 'VmVnicProfCl', '.', 'ORG_PATH', ':', '".*"', ',', 'self', '.', 'ucsmsdk', '.', 'VmVnicProfCl', '.', 'DN', ':', 'cl_profile_dest', ',', 'self', '.', 'ucsmsdk', '.', 'VmVnicProfCl', '.', 'NAME', ':', 'cl_profile_name', ',', 'self', '.', 'ucsmsdk', '.', 'VmVnicProfCl', '.', 'POLICY_OWNER', ':', '"local"', ',', 'self', '.', 'ucsmsdk', '.', 'VmVnicProfCl', '.', 'SW_NAME', ':', '".*"', ',', 'self', '.', 'ucsmsdk', '.', 'VmVnicProfCl', '.', 'DC_NAME', ':', '".*"', ',', 'self', '.', 'ucsmsdk', '.', 'VmVnicProfCl', '.', 'DESCR', ':', 'const', '.', 'DESCR', '}', ')', 'handle', '.', 'CompleteTransaction', '(', ')', 'if', 'not', 'cl_profile', ':', 'LOG', '.', 'warning', '(', "'UCS Manager network driver could not '", "'create Client Profile %s.'", ',', 'cl_profile_name', ')', 'return', 'False', 'LOG', '.', 'debug', '(', "'UCS Manager network driver created Client Profile '", "'%s at %s'", ',', 'cl_profile_name', ',', 'cl_profile_dest', ')', 'return', 'True', 'except', 'Exception', 'as', 'e', ':', 'return', 'self', '.', '_handle_ucsm_exception', '(', 'e', ',', "'Port Profile'", ',', 'profile_name', ',', 'ucsm_ip', ')']
Creates a Port Profile on the UCS Manager. Significant parameters set in the port profile are: 1. Port profile name - Should match what was set in vif_details 2. High performance mode - For VM-FEX to be enabled/configured on the port using this port profile, this mode should be enabled. 3. Vlan id - Vlan id used by traffic to and from the port.
['Creates', 'a', 'Port', 'Profile', 'on', 'the', 'UCS', 'Manager', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/ucsm/deprecated_network_driver.py#L306-L432
4,624
atmos-python/atmos
atmos/util.py
doc_paragraph
def doc_paragraph(s, indent=0): '''Takes in a string without wrapping corresponding to a paragraph, and returns a version of that string wrapped to be at most 80 characters in length on each line. If indent is given, ensures each line is indented to that number of spaces. ''' return '\n'.join([' '*indent + l for l in wrap(s, width=80-indent)])
python
def doc_paragraph(s, indent=0): '''Takes in a string without wrapping corresponding to a paragraph, and returns a version of that string wrapped to be at most 80 characters in length on each line. If indent is given, ensures each line is indented to that number of spaces. ''' return '\n'.join([' '*indent + l for l in wrap(s, width=80-indent)])
['def', 'doc_paragraph', '(', 's', ',', 'indent', '=', '0', ')', ':', 'return', "'\\n'", '.', 'join', '(', '[', "' '", '*', 'indent', '+', 'l', 'for', 'l', 'in', 'wrap', '(', 's', ',', 'width', '=', '80', '-', 'indent', ')', ']', ')']
Takes in a string without wrapping corresponding to a paragraph, and returns a version of that string wrapped to be at most 80 characters in length on each line. If indent is given, ensures each line is indented to that number of spaces.
['Takes', 'in', 'a', 'string', 'without', 'wrapping', 'corresponding', 'to', 'a', 'paragraph', 'and', 'returns', 'a', 'version', 'of', 'that', 'string', 'wrapped', 'to', 'be', 'at', 'most', '80', 'characters', 'in', 'length', 'on', 'each', 'line', '.', 'If', 'indent', 'is', 'given', 'ensures', 'each', 'line', 'is', 'indented', 'to', 'that', 'number', 'of', 'spaces', '.']
train
https://github.com/atmos-python/atmos/blob/f4af8eaca23cce881bde979599d15d322fc1935e/atmos/util.py#L133-L140
4,625
honzajavorek/redis-collections
redis_collections/lists.py
Deque.appendleft
def appendleft(self, value): """Add *value* to the left side of the collection.""" def appendleft_trans(pipe): self._appendleft_helper(value, pipe) self._transaction(appendleft_trans)
python
def appendleft(self, value): """Add *value* to the left side of the collection.""" def appendleft_trans(pipe): self._appendleft_helper(value, pipe) self._transaction(appendleft_trans)
['def', 'appendleft', '(', 'self', ',', 'value', ')', ':', 'def', 'appendleft_trans', '(', 'pipe', ')', ':', 'self', '.', '_appendleft_helper', '(', 'value', ',', 'pipe', ')', 'self', '.', '_transaction', '(', 'appendleft_trans', ')']
Add *value* to the left side of the collection.
['Add', '*', 'value', '*', 'to', 'the', 'left', 'side', 'of', 'the', 'collection', '.']
train
https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/lists.py#L764-L769
4,626
xray7224/PyPump
pypump/models/__init__.py
PumpObject._verb
def _verb(self, verb): """ Posts minimal activity with verb and bare self object. :param verb: verb to be used. """ activity = { "verb": verb, "object": { "id": self.id, "objectType": self.object_type, } } self._post_activity(activity)
python
def _verb(self, verb): """ Posts minimal activity with verb and bare self object. :param verb: verb to be used. """ activity = { "verb": verb, "object": { "id": self.id, "objectType": self.object_type, } } self._post_activity(activity)
['def', '_verb', '(', 'self', ',', 'verb', ')', ':', 'activity', '=', '{', '"verb"', ':', 'verb', ',', '"object"', ':', '{', '"id"', ':', 'self', '.', 'id', ',', '"objectType"', ':', 'self', '.', 'object_type', ',', '}', '}', 'self', '.', '_post_activity', '(', 'activity', ')']
Posts minimal activity with verb and bare self object. :param verb: verb to be used.
['Posts', 'minimal', 'activity', 'with', 'verb', 'and', 'bare', 'self', 'object', '.', ':', 'param', 'verb', ':', 'verb', 'to', 'be', 'used', '.']
train
https://github.com/xray7224/PyPump/blob/f921f691c39fe021f4fd124b6bc91718c9e49b4a/pypump/models/__init__.py#L85-L98
4,627
meejah/txtorcon
txtorcon/torconfig.py
EphemeralHiddenService.remove_from_tor
def remove_from_tor(self, protocol): ''' Returns a Deferred which fires with None ''' r = yield protocol.queue_command('DEL_ONION %s' % self.hostname[:-6]) if r.strip() != 'OK': raise RuntimeError('Failed to remove hidden service: "%s".' % r)
python
def remove_from_tor(self, protocol): ''' Returns a Deferred which fires with None ''' r = yield protocol.queue_command('DEL_ONION %s' % self.hostname[:-6]) if r.strip() != 'OK': raise RuntimeError('Failed to remove hidden service: "%s".' % r)
['def', 'remove_from_tor', '(', 'self', ',', 'protocol', ')', ':', 'r', '=', 'yield', 'protocol', '.', 'queue_command', '(', "'DEL_ONION %s'", '%', 'self', '.', 'hostname', '[', ':', '-', '6', ']', ')', 'if', 'r', '.', 'strip', '(', ')', '!=', "'OK'", ':', 'raise', 'RuntimeError', '(', '\'Failed to remove hidden service: "%s".\'', '%', 'r', ')']
Returns a Deferred which fires with None
['Returns', 'a', 'Deferred', 'which', 'fires', 'with', 'None']
train
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/torconfig.py#L488-L494
4,628
pysathq/pysat
pysat/solvers.py
Lingeling.get_model
def get_model(self): """ Get a model if the formula was previously satisfied. """ if self.lingeling and self.status == True: model = pysolvers.lingeling_model(self.lingeling) return model if model != None else []
python
def get_model(self): """ Get a model if the formula was previously satisfied. """ if self.lingeling and self.status == True: model = pysolvers.lingeling_model(self.lingeling) return model if model != None else []
['def', 'get_model', '(', 'self', ')', ':', 'if', 'self', '.', 'lingeling', 'and', 'self', '.', 'status', '==', 'True', ':', 'model', '=', 'pysolvers', '.', 'lingeling_model', '(', 'self', '.', 'lingeling', ')', 'return', 'model', 'if', 'model', '!=', 'None', 'else', '[', ']']
Get a model if the formula was previously satisfied.
['Get', 'a', 'model', 'if', 'the', 'formula', 'was', 'previously', 'satisfied', '.']
train
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L1651-L1658
4,629
astroswego/plotypus
src/plotypus/preprocessing.py
Fourier.transform
def transform(self, X, y=None, **params): """ Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`. """ data = numpy.dstack((numpy.array(X).T[0], range(len(X))))[0] phase, order = data[data[:,0].argsort()].T design_matrix = self.design_matrix(phase, self.degree) return design_matrix[order.argsort()]
python
def transform(self, X, y=None, **params): """ Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`. """ data = numpy.dstack((numpy.array(X).T[0], range(len(X))))[0] phase, order = data[data[:,0].argsort()].T design_matrix = self.design_matrix(phase, self.degree) return design_matrix[order.argsort()]
['def', 'transform', '(', 'self', ',', 'X', ',', 'y', '=', 'None', ',', '*', '*', 'params', ')', ':', 'data', '=', 'numpy', '.', 'dstack', '(', '(', 'numpy', '.', 'array', '(', 'X', ')', '.', 'T', '[', '0', ']', ',', 'range', '(', 'len', '(', 'X', ')', ')', ')', ')', '[', '0', ']', 'phase', ',', 'order', '=', 'data', '[', 'data', '[', ':', ',', '0', ']', '.', 'argsort', '(', ')', ']', '.', 'T', 'design_matrix', '=', 'self', '.', 'design_matrix', '(', 'phase', ',', 'self', '.', 'degree', ')', 'return', 'design_matrix', '[', 'order', '.', 'argsort', '(', ')', ']']
Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`.
['Transforms', '*', 'X', '*', 'from', 'phase', '-', 'space', 'to', 'Fourier', '-', 'space', 'returning', 'the', 'design', 'matrix', 'produced', 'by', ':', 'func', ':', 'Fourier', '.', 'design_matrix', 'for', 'input', 'to', 'a', 'regressor', '.']
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/preprocessing.py#L96-L117
4,630
fastai/fastai
fastai/gen_doc/gen_notebooks.py
execute_nb
def execute_nb(fname, metadata=None, save=True, show_doc_only=False): "Execute notebook `fname` with `metadata` for preprocessing." # Any module used in the notebook that isn't inside must be in the same directory as this script with open(fname) as f: nb = nbformat.read(f, as_version=4) ep_class = ExecuteShowDocPreprocessor if show_doc_only else ExecutePreprocessor ep = ep_class(timeout=600, kernel_name='python3') metadata = metadata or {} ep.preprocess(nb, metadata) if save: with open(fname, 'wt') as f: nbformat.write(nb, f) NotebookNotary().sign(nb)
python
def execute_nb(fname, metadata=None, save=True, show_doc_only=False): "Execute notebook `fname` with `metadata` for preprocessing." # Any module used in the notebook that isn't inside must be in the same directory as this script with open(fname) as f: nb = nbformat.read(f, as_version=4) ep_class = ExecuteShowDocPreprocessor if show_doc_only else ExecutePreprocessor ep = ep_class(timeout=600, kernel_name='python3') metadata = metadata or {} ep.preprocess(nb, metadata) if save: with open(fname, 'wt') as f: nbformat.write(nb, f) NotebookNotary().sign(nb)
['def', 'execute_nb', '(', 'fname', ',', 'metadata', '=', 'None', ',', 'save', '=', 'True', ',', 'show_doc_only', '=', 'False', ')', ':', "# Any module used in the notebook that isn't inside must be in the same directory as this script", 'with', 'open', '(', 'fname', ')', 'as', 'f', ':', 'nb', '=', 'nbformat', '.', 'read', '(', 'f', ',', 'as_version', '=', '4', ')', 'ep_class', '=', 'ExecuteShowDocPreprocessor', 'if', 'show_doc_only', 'else', 'ExecutePreprocessor', 'ep', '=', 'ep_class', '(', 'timeout', '=', '600', ',', 'kernel_name', '=', "'python3'", ')', 'metadata', '=', 'metadata', 'or', '{', '}', 'ep', '.', 'preprocess', '(', 'nb', ',', 'metadata', ')', 'if', 'save', ':', 'with', 'open', '(', 'fname', ',', "'wt'", ')', 'as', 'f', ':', 'nbformat', '.', 'write', '(', 'nb', ',', 'f', ')', 'NotebookNotary', '(', ')', '.', 'sign', '(', 'nb', ')']
Execute notebook `fname` with `metadata` for preprocessing.
['Execute', 'notebook', 'fname', 'with', 'metadata', 'for', 'preprocessing', '.']
train
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/gen_doc/gen_notebooks.py#L79-L89
4,631
flatangle/flatlib
flatlib/ephem/ephem.py
nextSolarReturn
def nextSolarReturn(date, lon): """ Returns the next date when sun is at longitude 'lon'. """ jd = eph.nextSolarReturn(date.jd, lon) return Datetime.fromJD(jd, date.utcoffset)
python
def nextSolarReturn(date, lon): """ Returns the next date when sun is at longitude 'lon'. """ jd = eph.nextSolarReturn(date.jd, lon) return Datetime.fromJD(jd, date.utcoffset)
['def', 'nextSolarReturn', '(', 'date', ',', 'lon', ')', ':', 'jd', '=', 'eph', '.', 'nextSolarReturn', '(', 'date', '.', 'jd', ',', 'lon', ')', 'return', 'Datetime', '.', 'fromJD', '(', 'jd', ',', 'date', '.', 'utcoffset', ')']
Returns the next date when sun is at longitude 'lon'.
['Returns', 'the', 'next', 'date', 'when', 'sun', 'is', 'at', 'longitude', 'lon', '.']
train
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/ephem/ephem.py#L77-L80
4,632
praw-dev/prawtools
prawtools/stats.py
SubredditStats.basic_stats
def basic_stats(self): """Return a markdown representation of simple statistics.""" comment_score = sum(comment.score for comment in self.comments) if self.comments: comment_duration = (self.comments[-1].created_utc - self.comments[0].created_utc) comment_rate = self._rate(len(self.comments), comment_duration) else: comment_rate = 0 submission_duration = self.max_date - self.min_date submission_rate = self._rate(len(self.submissions), submission_duration) submission_score = sum(sub.score for sub in self.submissions.values()) values = [('Total', len(self.submissions), len(self.comments)), ('Rate (per day)', '{:.2f}'.format(submission_rate), '{:.2f}'.format(comment_rate)), ('Unique Redditors', len(self.submitters), len(self.commenters)), ('Combined Score', submission_score, comment_score)] retval = 'Period: {:.2f} days\n\n'.format(submission_duration / 86400.) retval += '||Submissions|Comments|\n:-:|--:|--:\n' for quad in values: retval += '__{}__|{}|{}\n'.format(*quad) return retval + '\n'
python
def basic_stats(self): """Return a markdown representation of simple statistics.""" comment_score = sum(comment.score for comment in self.comments) if self.comments: comment_duration = (self.comments[-1].created_utc - self.comments[0].created_utc) comment_rate = self._rate(len(self.comments), comment_duration) else: comment_rate = 0 submission_duration = self.max_date - self.min_date submission_rate = self._rate(len(self.submissions), submission_duration) submission_score = sum(sub.score for sub in self.submissions.values()) values = [('Total', len(self.submissions), len(self.comments)), ('Rate (per day)', '{:.2f}'.format(submission_rate), '{:.2f}'.format(comment_rate)), ('Unique Redditors', len(self.submitters), len(self.commenters)), ('Combined Score', submission_score, comment_score)] retval = 'Period: {:.2f} days\n\n'.format(submission_duration / 86400.) retval += '||Submissions|Comments|\n:-:|--:|--:\n' for quad in values: retval += '__{}__|{}|{}\n'.format(*quad) return retval + '\n'
['def', 'basic_stats', '(', 'self', ')', ':', 'comment_score', '=', 'sum', '(', 'comment', '.', 'score', 'for', 'comment', 'in', 'self', '.', 'comments', ')', 'if', 'self', '.', 'comments', ':', 'comment_duration', '=', '(', 'self', '.', 'comments', '[', '-', '1', ']', '.', 'created_utc', '-', 'self', '.', 'comments', '[', '0', ']', '.', 'created_utc', ')', 'comment_rate', '=', 'self', '.', '_rate', '(', 'len', '(', 'self', '.', 'comments', ')', ',', 'comment_duration', ')', 'else', ':', 'comment_rate', '=', '0', 'submission_duration', '=', 'self', '.', 'max_date', '-', 'self', '.', 'min_date', 'submission_rate', '=', 'self', '.', '_rate', '(', 'len', '(', 'self', '.', 'submissions', ')', ',', 'submission_duration', ')', 'submission_score', '=', 'sum', '(', 'sub', '.', 'score', 'for', 'sub', 'in', 'self', '.', 'submissions', '.', 'values', '(', ')', ')', 'values', '=', '[', '(', "'Total'", ',', 'len', '(', 'self', '.', 'submissions', ')', ',', 'len', '(', 'self', '.', 'comments', ')', ')', ',', '(', "'Rate (per day)'", ',', "'{:.2f}'", '.', 'format', '(', 'submission_rate', ')', ',', "'{:.2f}'", '.', 'format', '(', 'comment_rate', ')', ')', ',', '(', "'Unique Redditors'", ',', 'len', '(', 'self', '.', 'submitters', ')', ',', 'len', '(', 'self', '.', 'commenters', ')', ')', ',', '(', "'Combined Score'", ',', 'submission_score', ',', 'comment_score', ')', ']', 'retval', '=', "'Period: {:.2f} days\\n\\n'", '.', 'format', '(', 'submission_duration', '/', '86400.', ')', 'retval', '+=', "'||Submissions|Comments|\\n:-:|--:|--:\\n'", 'for', 'quad', 'in', 'values', ':', 'retval', '+=', "'__{}__|{}|{}\\n'", '.', 'format', '(', '*', 'quad', ')', 'return', 'retval', '+', "'\\n'"]
Return a markdown representation of simple statistics.
['Return', 'a', 'markdown', 'representation', 'of', 'simple', 'statistics', '.']
train
https://github.com/praw-dev/prawtools/blob/571d5c28c2222f6f8dbbca8c815b8da0a776ab85/prawtools/stats.py#L112-L138
4,633
pandas-dev/pandas
pandas/core/generic.py
NDFrame._get_label_or_level_values
def _get_label_or_level_values(self, key, axis=0): """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key: str Label or level name. axis: int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- values: np.ndarray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels FutureWarning if `key` is ambiguous. This will become an ambiguity error in a future version """ if self.ndim > 2: raise NotImplementedError( "_get_label_or_level_values is not implemented for {type}" .format(type=type(self))) axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance( self._get_axis(other_axes[0]), MultiIndex): multi_message = ('\n' 'For a multi-index, the label must be a ' 'tuple with elements corresponding to ' 'each level.') else: multi_message = '' label_axis_name = 'column' if axis == 0 else 'index' raise ValueError(("The {label_axis_name} label '{key}' " "is not unique.{multi_message}") .format(key=key, label_axis_name=label_axis_name, multi_message=multi_message)) return values
python
def _get_label_or_level_values(self, key, axis=0): """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key: str Label or level name. axis: int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- values: np.ndarray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels FutureWarning if `key` is ambiguous. This will become an ambiguity error in a future version """ if self.ndim > 2: raise NotImplementedError( "_get_label_or_level_values is not implemented for {type}" .format(type=type(self))) axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance( self._get_axis(other_axes[0]), MultiIndex): multi_message = ('\n' 'For a multi-index, the label must be a ' 'tuple with elements corresponding to ' 'each level.') else: multi_message = '' label_axis_name = 'column' if axis == 0 else 'index' raise ValueError(("The {label_axis_name} label '{key}' " "is not unique.{multi_message}") .format(key=key, label_axis_name=label_axis_name, multi_message=multi_message)) return values
['def', '_get_label_or_level_values', '(', 'self', ',', 'key', ',', 'axis', '=', '0', ')', ':', 'if', 'self', '.', 'ndim', '>', '2', ':', 'raise', 'NotImplementedError', '(', '"_get_label_or_level_values is not implemented for {type}"', '.', 'format', '(', 'type', '=', 'type', '(', 'self', ')', ')', ')', 'axis', '=', 'self', '.', '_get_axis_number', '(', 'axis', ')', 'other_axes', '=', '[', 'ax', 'for', 'ax', 'in', 'range', '(', 'self', '.', '_AXIS_LEN', ')', 'if', 'ax', '!=', 'axis', ']', 'if', 'self', '.', '_is_label_reference', '(', 'key', ',', 'axis', '=', 'axis', ')', ':', 'self', '.', '_check_label_or_level_ambiguity', '(', 'key', ',', 'axis', '=', 'axis', ')', 'values', '=', 'self', '.', 'xs', '(', 'key', ',', 'axis', '=', 'other_axes', '[', '0', ']', ')', '.', '_values', 'elif', 'self', '.', '_is_level_reference', '(', 'key', ',', 'axis', '=', 'axis', ')', ':', 'values', '=', 'self', '.', 'axes', '[', 'axis', ']', '.', 'get_level_values', '(', 'key', ')', '.', '_values', 'else', ':', 'raise', 'KeyError', '(', 'key', ')', '# Check for duplicates', 'if', 'values', '.', 'ndim', '>', '1', ':', 'if', 'other_axes', 'and', 'isinstance', '(', 'self', '.', '_get_axis', '(', 'other_axes', '[', '0', ']', ')', ',', 'MultiIndex', ')', ':', 'multi_message', '=', '(', "'\\n'", "'For a multi-index, the label must be a '", "'tuple with elements corresponding to '", "'each level.'", ')', 'else', ':', 'multi_message', '=', "''", 'label_axis_name', '=', "'column'", 'if', 'axis', '==', '0', 'else', "'index'", 'raise', 'ValueError', '(', '(', '"The {label_axis_name} label \'{key}\' "', '"is not unique.{multi_message}"', ')', '.', 'format', '(', 'key', '=', 'key', ',', 'label_axis_name', '=', 'label_axis_name', ',', 'multi_message', '=', 'multi_message', ')', ')', 'return', 'values']
Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key: str Label or level name. axis: int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- values: np.ndarray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels FutureWarning if `key` is ambiguous. This will become an ambiguity error in a future version
['Return', 'a', '1', '-', 'D', 'array', 'of', 'values', 'associated', 'with', 'key', 'a', 'label', 'or', 'level', 'from', 'the', 'given', 'axis', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L1688-L1757
4,634
log2timeline/plaso
plaso/parsers/presets.py
ParserPresetsManager.GetPresetsByOperatingSystem
def GetPresetsByOperatingSystem(self, operating_system): """Retrieves preset definitions for a specific operating system. Args: operating_system (OperatingSystemArtifact): an operating system artifact attribute container. Returns: list[PresetDefinition]: preset definition that correspond with the operating system. """ preset_definitions = [] for preset_definition in self._definitions.values(): for preset_operating_system in preset_definition.operating_systems: if preset_operating_system.IsEquivalent(operating_system): preset_definitions.append(preset_definition) return preset_definitions
python
def GetPresetsByOperatingSystem(self, operating_system): """Retrieves preset definitions for a specific operating system. Args: operating_system (OperatingSystemArtifact): an operating system artifact attribute container. Returns: list[PresetDefinition]: preset definition that correspond with the operating system. """ preset_definitions = [] for preset_definition in self._definitions.values(): for preset_operating_system in preset_definition.operating_systems: if preset_operating_system.IsEquivalent(operating_system): preset_definitions.append(preset_definition) return preset_definitions
['def', 'GetPresetsByOperatingSystem', '(', 'self', ',', 'operating_system', ')', ':', 'preset_definitions', '=', '[', ']', 'for', 'preset_definition', 'in', 'self', '.', '_definitions', '.', 'values', '(', ')', ':', 'for', 'preset_operating_system', 'in', 'preset_definition', '.', 'operating_systems', ':', 'if', 'preset_operating_system', '.', 'IsEquivalent', '(', 'operating_system', ')', ':', 'preset_definitions', '.', 'append', '(', 'preset_definition', ')', 'return', 'preset_definitions']
Retrieves preset definitions for a specific operating system. Args: operating_system (OperatingSystemArtifact): an operating system artifact attribute container. Returns: list[PresetDefinition]: preset definition that correspond with the operating system.
['Retrieves', 'preset', 'definitions', 'for', 'a', 'specific', 'operating', 'system', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/presets.py#L159-L176
4,635
gem/oq-engine
openquake/engine/engine.py
poll_queue
def poll_queue(job_id, pid, poll_time): """ Check the queue of executing/submitted jobs and exit when there is a free slot. """ if config.distribution.serialize_jobs: first_time = True while True: jobs = logs.dbcmd(GET_JOBS) failed = [job.id for job in jobs if not psutil.pid_exists(job.pid)] if failed: logs.dbcmd("UPDATE job SET status='failed', is_running=0 " "WHERE id in (?X)", failed) elif any(job.id < job_id for job in jobs): if first_time: logs.LOG.warn('Waiting for jobs %s', [j.id for j in jobs]) logs.dbcmd('update_job', job_id, {'status': 'submitted', 'pid': pid}) first_time = False time.sleep(poll_time) else: break logs.dbcmd('update_job', job_id, {'status': 'executing', 'pid': _PID})
python
def poll_queue(job_id, pid, poll_time): """ Check the queue of executing/submitted jobs and exit when there is a free slot. """ if config.distribution.serialize_jobs: first_time = True while True: jobs = logs.dbcmd(GET_JOBS) failed = [job.id for job in jobs if not psutil.pid_exists(job.pid)] if failed: logs.dbcmd("UPDATE job SET status='failed', is_running=0 " "WHERE id in (?X)", failed) elif any(job.id < job_id for job in jobs): if first_time: logs.LOG.warn('Waiting for jobs %s', [j.id for j in jobs]) logs.dbcmd('update_job', job_id, {'status': 'submitted', 'pid': pid}) first_time = False time.sleep(poll_time) else: break logs.dbcmd('update_job', job_id, {'status': 'executing', 'pid': _PID})
['def', 'poll_queue', '(', 'job_id', ',', 'pid', ',', 'poll_time', ')', ':', 'if', 'config', '.', 'distribution', '.', 'serialize_jobs', ':', 'first_time', '=', 'True', 'while', 'True', ':', 'jobs', '=', 'logs', '.', 'dbcmd', '(', 'GET_JOBS', ')', 'failed', '=', '[', 'job', '.', 'id', 'for', 'job', 'in', 'jobs', 'if', 'not', 'psutil', '.', 'pid_exists', '(', 'job', '.', 'pid', ')', ']', 'if', 'failed', ':', 'logs', '.', 'dbcmd', '(', '"UPDATE job SET status=\'failed\', is_running=0 "', '"WHERE id in (?X)"', ',', 'failed', ')', 'elif', 'any', '(', 'job', '.', 'id', '<', 'job_id', 'for', 'job', 'in', 'jobs', ')', ':', 'if', 'first_time', ':', 'logs', '.', 'LOG', '.', 'warn', '(', "'Waiting for jobs %s'", ',', '[', 'j', '.', 'id', 'for', 'j', 'in', 'jobs', ']', ')', 'logs', '.', 'dbcmd', '(', "'update_job'", ',', 'job_id', ',', '{', "'status'", ':', "'submitted'", ',', "'pid'", ':', 'pid', '}', ')', 'first_time', '=', 'False', 'time', '.', 'sleep', '(', 'poll_time', ')', 'else', ':', 'break', 'logs', '.', 'dbcmd', '(', "'update_job'", ',', 'job_id', ',', '{', "'status'", ':', "'executing'", ',', "'pid'", ':', '_PID', '}', ')']
Check the queue of executing/submitted jobs and exit when there is a free slot.
['Check', 'the', 'queue', 'of', 'executing', '/', 'submitted', 'jobs', 'and', 'exit', 'when', 'there', 'is', 'a', 'free', 'slot', '.']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/engine/engine.py#L269-L291
4,636
helixyte/everest
everest/utils.py
generative
def generative(func): """ Marks an instance method as generative. """ def wrap(inst, *args, **kw): clone = type(inst).__new__(type(inst)) clone.__dict__ = inst.__dict__.copy() return func(clone, *args, **kw) return update_wrapper(wrap, func)
python
def generative(func): """ Marks an instance method as generative. """ def wrap(inst, *args, **kw): clone = type(inst).__new__(type(inst)) clone.__dict__ = inst.__dict__.copy() return func(clone, *args, **kw) return update_wrapper(wrap, func)
['def', 'generative', '(', 'func', ')', ':', 'def', 'wrap', '(', 'inst', ',', '*', 'args', ',', '*', '*', 'kw', ')', ':', 'clone', '=', 'type', '(', 'inst', ')', '.', '__new__', '(', 'type', '(', 'inst', ')', ')', 'clone', '.', '__dict__', '=', 'inst', '.', '__dict__', '.', 'copy', '(', ')', 'return', 'func', '(', 'clone', ',', '*', 'args', ',', '*', '*', 'kw', ')', 'return', 'update_wrapper', '(', 'wrap', ',', 'func', ')']
Marks an instance method as generative.
['Marks', 'an', 'instance', 'method', 'as', 'generative', '.']
train
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/utils.py#L506-L514
4,637
titusjan/argos
argos/inspector/qtplugins/table.py
TableInspectorModel.setFont
def setFont(self, font): """ Sets the font that will be returned when data() is called with the Qt.FontRole. Can be a QFont or None if no font is set. """ check_class(font, QtGui.QFont, allow_none=True) self._font = font
python
def setFont(self, font): """ Sets the font that will be returned when data() is called with the Qt.FontRole. Can be a QFont or None if no font is set. """ check_class(font, QtGui.QFont, allow_none=True) self._font = font
['def', 'setFont', '(', 'self', ',', 'font', ')', ':', 'check_class', '(', 'font', ',', 'QtGui', '.', 'QFont', ',', 'allow_none', '=', 'True', ')', 'self', '.', '_font', '=', 'font']
Sets the font that will be returned when data() is called with the Qt.FontRole. Can be a QFont or None if no font is set.
['Sets', 'the', 'font', 'that', 'will', 'be', 'returned', 'when', 'data', '()', 'is', 'called', 'with', 'the', 'Qt', '.', 'FontRole', '.', 'Can', 'be', 'a', 'QFont', 'or', 'None', 'if', 'no', 'font', 'is', 'set', '.']
train
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/inspector/qtplugins/table.py#L601-L606
4,638
crocs-muni/roca
roca/detect.py
RocaFingerprinter.has_fingerprint_moduli
def has_fingerprint_moduli(self, modulus): """ Returns true if the fingerprint was detected in the key :param modulus: :return: """ if not self.is_acceptable_modulus(modulus): return False self.tested += 1 for i in range(0, len(self.primes)): if (1 << (modulus % self.primes[i])) & self.prints[i] == 0: return False self.found += 1 return True
python
def has_fingerprint_moduli(self, modulus): """ Returns true if the fingerprint was detected in the key :param modulus: :return: """ if not self.is_acceptable_modulus(modulus): return False self.tested += 1 for i in range(0, len(self.primes)): if (1 << (modulus % self.primes[i])) & self.prints[i] == 0: return False self.found += 1 return True
['def', 'has_fingerprint_moduli', '(', 'self', ',', 'modulus', ')', ':', 'if', 'not', 'self', '.', 'is_acceptable_modulus', '(', 'modulus', ')', ':', 'return', 'False', 'self', '.', 'tested', '+=', '1', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'self', '.', 'primes', ')', ')', ':', 'if', '(', '1', '<<', '(', 'modulus', '%', 'self', '.', 'primes', '[', 'i', ']', ')', ')', '&', 'self', '.', 'prints', '[', 'i', ']', '==', '0', ':', 'return', 'False', 'self', '.', 'found', '+=', '1', 'return', 'True']
Returns true if the fingerprint was detected in the key :param modulus: :return:
['Returns', 'true', 'if', 'the', 'fingerprint', 'was', 'detected', 'in', 'the', 'key', ':', 'param', 'modulus', ':', ':', 'return', ':']
train
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L937-L952
4,639
yyuu/botornado
boto/storage_uri.py
FileStorageUri.names_singleton
def names_singleton(self): """Returns True if this URI names a file or if URI represents input/output stream. """ if self.stream: return True else: return os.path.isfile(self.object_name)
python
def names_singleton(self): """Returns True if this URI names a file or if URI represents input/output stream. """ if self.stream: return True else: return os.path.isfile(self.object_name)
['def', 'names_singleton', '(', 'self', ')', ':', 'if', 'self', '.', 'stream', ':', 'return', 'True', 'else', ':', 'return', 'os', '.', 'path', '.', 'isfile', '(', 'self', '.', 'object_name', ')']
Returns True if this URI names a file or if URI represents input/output stream.
['Returns', 'True', 'if', 'this', 'URI', 'names', 'a', 'file', 'or', 'if', 'URI', 'represents', 'input', '/', 'output', 'stream', '.']
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/storage_uri.py#L493-L500
4,640
StackStorm/pybind
pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction/__init__.py
interface_refresh_reduction._set_bundle_message
def _set_bundle_message(self, v, load=False): """ Setter method for bundle_message, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction/bundle_message (container) If this variable is read-only (config: false) in the source YANG file, then _set_bundle_message is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_bundle_message() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=bundle_message.bundle_message, is_container='container', presence=True, yang_name="bundle-message", rest_name="bundle-message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Refresh Reduction bundle messaging feature', u'alt-name': u'bundle-message'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """bundle_message must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=bundle_message.bundle_message, is_container='container', presence=True, yang_name="bundle-message", rest_name="bundle-message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Refresh Reduction bundle messaging feature', u'alt-name': u'bundle-message'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""", }) self.__bundle_message = t if hasattr(self, '_set'): self._set()
python
def _set_bundle_message(self, v, load=False): """ Setter method for bundle_message, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction/bundle_message (container) If this variable is read-only (config: false) in the source YANG file, then _set_bundle_message is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_bundle_message() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=bundle_message.bundle_message, is_container='container', presence=True, yang_name="bundle-message", rest_name="bundle-message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Refresh Reduction bundle messaging feature', u'alt-name': u'bundle-message'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """bundle_message must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=bundle_message.bundle_message, is_container='container', presence=True, yang_name="bundle-message", rest_name="bundle-message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Refresh Reduction bundle messaging feature', u'alt-name': u'bundle-message'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""", }) self.__bundle_message = t if hasattr(self, '_set'): self._set()
['def', '_set_bundle_message', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'bundle_message', '.', 'bundle_message', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'True', ',', 'yang_name', '=', '"bundle-message"', ',', 'rest_name', '=', '"bundle-message"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Refresh Reduction bundle messaging feature'", ',', "u'alt-name'", ':', "u'bundle-message'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-mpls'", ',', 'defining_module', '=', "'brocade-mpls'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""bundle_message must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=bundle_message.bundle_message, is_container=\'container\', presence=True, yang_name="bundle-message", rest_name="bundle-message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Refresh Reduction bundle messaging feature\', u\'alt-name\': u\'bundle-message\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__bundle_message', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for bundle_message, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction/bundle_message (container) If this variable is read-only (config: false) in the source YANG file, then _set_bundle_message is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_bundle_message() directly.
['Setter', 'method', 'for', 'bundle_message', 'mapped', 'from', 'YANG', 'variable', '/', 'mpls_config', '/', 'router', '/', 'mpls', '/', 'mpls_cmds_holder', '/', 'mpls_interface', '/', 'rsvp', '/', 'interface_refresh_reduction', '/', 'bundle_message', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_bundle_message', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_bundle_message', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_refresh_reduction/__init__.py#L161-L182
4,641
tanghaibao/goatools
goatools/gosubdag/rpt/write_hierarchy.py
WrHierGO._get_namespace2go2term
def _get_namespace2go2term(go2terms): """Group GO IDs by namespace.""" namespace2go2term = cx.defaultdict(dict) for goid, goterm in go2terms.items(): namespace2go2term[goterm.namespace][goid] = goterm return namespace2go2term
python
def _get_namespace2go2term(go2terms): """Group GO IDs by namespace.""" namespace2go2term = cx.defaultdict(dict) for goid, goterm in go2terms.items(): namespace2go2term[goterm.namespace][goid] = goterm return namespace2go2term
['def', '_get_namespace2go2term', '(', 'go2terms', ')', ':', 'namespace2go2term', '=', 'cx', '.', 'defaultdict', '(', 'dict', ')', 'for', 'goid', ',', 'goterm', 'in', 'go2terms', '.', 'items', '(', ')', ':', 'namespace2go2term', '[', 'goterm', '.', 'namespace', ']', '[', 'goid', ']', '=', 'goterm', 'return', 'namespace2go2term']
Group GO IDs by namespace.
['Group', 'GO', 'IDs', 'by', 'namespace', '.']
train
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/rpt/write_hierarchy.py#L71-L76
4,642
aws/aws-iot-device-sdk-python
AWSIoTPythonSDK/MQTTLib.py
AWSIoTMQTTClient.configureEndpoint
def configureEndpoint(self, hostName, portNumber): """ **Description** Used to configure the host name and port number the client tries to connect to. Should be called before connect. **Syntax** .. code:: python myAWSIoTMQTTClient.configureEndpoint("random.iot.region.amazonaws.com", 8883) **Parameters** *hostName* - String that denotes the host name of the user-specific AWS IoT endpoint. *portNumber* - Integer that denotes the port number to connect to. Could be :code:`8883` for TLSv1.2 Mutual Authentication or :code:`443` for Websocket SigV4 and TLSv1.2 Mutual Authentication with ALPN extension. **Returns** None """ endpoint_provider = EndpointProvider() endpoint_provider.set_host(hostName) endpoint_provider.set_port(portNumber) self._mqtt_core.configure_endpoint(endpoint_provider) if portNumber == 443 and not self._mqtt_core.use_wss(): self._mqtt_core.configure_alpn_protocols()
python
def configureEndpoint(self, hostName, portNumber): """ **Description** Used to configure the host name and port number the client tries to connect to. Should be called before connect. **Syntax** .. code:: python myAWSIoTMQTTClient.configureEndpoint("random.iot.region.amazonaws.com", 8883) **Parameters** *hostName* - String that denotes the host name of the user-specific AWS IoT endpoint. *portNumber* - Integer that denotes the port number to connect to. Could be :code:`8883` for TLSv1.2 Mutual Authentication or :code:`443` for Websocket SigV4 and TLSv1.2 Mutual Authentication with ALPN extension. **Returns** None """ endpoint_provider = EndpointProvider() endpoint_provider.set_host(hostName) endpoint_provider.set_port(portNumber) self._mqtt_core.configure_endpoint(endpoint_provider) if portNumber == 443 and not self._mqtt_core.use_wss(): self._mqtt_core.configure_alpn_protocols()
['def', 'configureEndpoint', '(', 'self', ',', 'hostName', ',', 'portNumber', ')', ':', 'endpoint_provider', '=', 'EndpointProvider', '(', ')', 'endpoint_provider', '.', 'set_host', '(', 'hostName', ')', 'endpoint_provider', '.', 'set_port', '(', 'portNumber', ')', 'self', '.', '_mqtt_core', '.', 'configure_endpoint', '(', 'endpoint_provider', ')', 'if', 'portNumber', '==', '443', 'and', 'not', 'self', '.', '_mqtt_core', '.', 'use_wss', '(', ')', ':', 'self', '.', '_mqtt_core', '.', 'configure_alpn_protocols', '(', ')']
**Description** Used to configure the host name and port number the client tries to connect to. Should be called before connect. **Syntax** .. code:: python myAWSIoTMQTTClient.configureEndpoint("random.iot.region.amazonaws.com", 8883) **Parameters** *hostName* - String that denotes the host name of the user-specific AWS IoT endpoint. *portNumber* - Integer that denotes the port number to connect to. Could be :code:`8883` for TLSv1.2 Mutual Authentication or :code:`443` for Websocket SigV4 and TLSv1.2 Mutual Authentication with ALPN extension. **Returns** None
['**', 'Description', '**']
train
https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/MQTTLib.py#L140-L171
4,643
aio-libs/aiohttp
aiohttp/multipart.py
BodyPartReader.get_charset
def get_charset(self, default: str) -> str: """Returns charset parameter from Content-Type header or default.""" ctype = self.headers.get(CONTENT_TYPE, '') mimetype = parse_mimetype(ctype) return mimetype.parameters.get('charset', default)
python
def get_charset(self, default: str) -> str: """Returns charset parameter from Content-Type header or default.""" ctype = self.headers.get(CONTENT_TYPE, '') mimetype = parse_mimetype(ctype) return mimetype.parameters.get('charset', default)
['def', 'get_charset', '(', 'self', ',', 'default', ':', 'str', ')', '->', 'str', ':', 'ctype', '=', 'self', '.', 'headers', '.', 'get', '(', 'CONTENT_TYPE', ',', "''", ')', 'mimetype', '=', 'parse_mimetype', '(', 'ctype', ')', 'return', 'mimetype', '.', 'parameters', '.', 'get', '(', "'charset'", ',', 'default', ')']
Returns charset parameter from Content-Type header or default.
['Returns', 'charset', 'parameter', 'from', 'Content', '-', 'Type', 'header', 'or', 'default', '.']
train
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/multipart.py#L470-L474
4,644
juju/python-libjuju
juju/client/connector.py
Connector.connect_controller
async def connect_controller(self, controller_name=None): """Connect to a controller by name. If the name is empty, it connect to the current controller. """ if not controller_name: controller_name = self.jujudata.current_controller() if not controller_name: raise JujuConnectionError('No current controller') controller = self.jujudata.controllers()[controller_name] # TODO change Connection so we can pass all the endpoints # instead of just the first. endpoint = controller['api-endpoints'][0] accounts = self.jujudata.accounts().get(controller_name, {}) await self.connect( endpoint=endpoint, uuid=None, username=accounts.get('user'), password=accounts.get('password'), cacert=controller.get('ca-cert'), bakery_client=self.bakery_client_for_controller(controller_name), ) self.controller_name = controller_name
python
async def connect_controller(self, controller_name=None): """Connect to a controller by name. If the name is empty, it connect to the current controller. """ if not controller_name: controller_name = self.jujudata.current_controller() if not controller_name: raise JujuConnectionError('No current controller') controller = self.jujudata.controllers()[controller_name] # TODO change Connection so we can pass all the endpoints # instead of just the first. endpoint = controller['api-endpoints'][0] accounts = self.jujudata.accounts().get(controller_name, {}) await self.connect( endpoint=endpoint, uuid=None, username=accounts.get('user'), password=accounts.get('password'), cacert=controller.get('ca-cert'), bakery_client=self.bakery_client_for_controller(controller_name), ) self.controller_name = controller_name
['async', 'def', 'connect_controller', '(', 'self', ',', 'controller_name', '=', 'None', ')', ':', 'if', 'not', 'controller_name', ':', 'controller_name', '=', 'self', '.', 'jujudata', '.', 'current_controller', '(', ')', 'if', 'not', 'controller_name', ':', 'raise', 'JujuConnectionError', '(', "'No current controller'", ')', 'controller', '=', 'self', '.', 'jujudata', '.', 'controllers', '(', ')', '[', 'controller_name', ']', '# TODO change Connection so we can pass all the endpoints', '# instead of just the first.', 'endpoint', '=', 'controller', '[', "'api-endpoints'", ']', '[', '0', ']', 'accounts', '=', 'self', '.', 'jujudata', '.', 'accounts', '(', ')', '.', 'get', '(', 'controller_name', ',', '{', '}', ')', 'await', 'self', '.', 'connect', '(', 'endpoint', '=', 'endpoint', ',', 'uuid', '=', 'None', ',', 'username', '=', 'accounts', '.', 'get', '(', "'user'", ')', ',', 'password', '=', 'accounts', '.', 'get', '(', "'password'", ')', ',', 'cacert', '=', 'controller', '.', 'get', '(', "'ca-cert'", ')', ',', 'bakery_client', '=', 'self', '.', 'bakery_client_for_controller', '(', 'controller_name', ')', ',', ')', 'self', '.', 'controller_name', '=', 'controller_name']
Connect to a controller by name. If the name is empty, it connect to the current controller.
['Connect', 'to', 'a', 'controller', 'by', 'name', '.', 'If', 'the', 'name', 'is', 'empty', 'it', 'connect', 'to', 'the', 'current', 'controller', '.']
train
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/connector.py#L78-L101
4,645
noahbenson/pimms
pimms/util.py
LazyPMap.is_normal
def is_normal(self, k): ''' lmap.is_normal(k) yields True if k is a key in the given lazy map lmap that is neither lazy nor a formerly-lazy memoized key. ''' v = ps.PMap.__getitem__(self, k) if not isinstance(v, (types.FunctionType, partial)) or [] != getargspec_py27like(v)[0]: return True else: return False
python
def is_normal(self, k): ''' lmap.is_normal(k) yields True if k is a key in the given lazy map lmap that is neither lazy nor a formerly-lazy memoized key. ''' v = ps.PMap.__getitem__(self, k) if not isinstance(v, (types.FunctionType, partial)) or [] != getargspec_py27like(v)[0]: return True else: return False
['def', 'is_normal', '(', 'self', ',', 'k', ')', ':', 'v', '=', 'ps', '.', 'PMap', '.', '__getitem__', '(', 'self', ',', 'k', ')', 'if', 'not', 'isinstance', '(', 'v', ',', '(', 'types', '.', 'FunctionType', ',', 'partial', ')', ')', 'or', '[', ']', '!=', 'getargspec_py27like', '(', 'v', ')', '[', '0', ']', ':', 'return', 'True', 'else', ':', 'return', 'False']
lmap.is_normal(k) yields True if k is a key in the given lazy map lmap that is neither lazy nor a formerly-lazy memoized key.
['lmap', '.', 'is_normal', '(', 'k', ')', 'yields', 'True', 'if', 'k', 'is', 'a', 'key', 'in', 'the', 'given', 'lazy', 'map', 'lmap', 'that', 'is', 'neither', 'lazy', 'nor', 'a', 'formerly', '-', 'lazy', 'memoized', 'key', '.']
train
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L674-L683
4,646
pdkit/pdkit
pdkit/utils.py
plot_segmentation
def plot_segmentation(data, peaks, segment_indexes, figsize=(10, 5)): """ Will plot the data and segmentation based on the peaks and segment indexes. :param 1d-array data: The orginal axis of the data that was segmented into sections. :param 1d-array peaks: Peaks of the data. :param 1d-array segment_indexes: These are the different classes, corresponding to each peak. Will not return anything, instead it will plot the data and peaks with different colors for each class. """ fig, ax = plt.subplots(figsize=figsize) plt.plot(data); for segment in np.unique(segment_indexes): plt.plot(peaks[np.where(segment_indexes == segment)[0]], data[peaks][np.where(segment_indexes == segment)[0]], 'o') plt.show()
python
def plot_segmentation(data, peaks, segment_indexes, figsize=(10, 5)): """ Will plot the data and segmentation based on the peaks and segment indexes. :param 1d-array data: The orginal axis of the data that was segmented into sections. :param 1d-array peaks: Peaks of the data. :param 1d-array segment_indexes: These are the different classes, corresponding to each peak. Will not return anything, instead it will plot the data and peaks with different colors for each class. """ fig, ax = plt.subplots(figsize=figsize) plt.plot(data); for segment in np.unique(segment_indexes): plt.plot(peaks[np.where(segment_indexes == segment)[0]], data[peaks][np.where(segment_indexes == segment)[0]], 'o') plt.show()
['def', 'plot_segmentation', '(', 'data', ',', 'peaks', ',', 'segment_indexes', ',', 'figsize', '=', '(', '10', ',', '5', ')', ')', ':', 'fig', ',', 'ax', '=', 'plt', '.', 'subplots', '(', 'figsize', '=', 'figsize', ')', 'plt', '.', 'plot', '(', 'data', ')', 'for', 'segment', 'in', 'np', '.', 'unique', '(', 'segment_indexes', ')', ':', 'plt', '.', 'plot', '(', 'peaks', '[', 'np', '.', 'where', '(', 'segment_indexes', '==', 'segment', ')', '[', '0', ']', ']', ',', 'data', '[', 'peaks', ']', '[', 'np', '.', 'where', '(', 'segment_indexes', '==', 'segment', ')', '[', '0', ']', ']', ',', "'o'", ')', 'plt', '.', 'show', '(', ')']
Will plot the data and segmentation based on the peaks and segment indexes. :param 1d-array data: The orginal axis of the data that was segmented into sections. :param 1d-array peaks: Peaks of the data. :param 1d-array segment_indexes: These are the different classes, corresponding to each peak. Will not return anything, instead it will plot the data and peaks with different colors for each class.
['Will', 'plot', 'the', 'data', 'and', 'segmentation', 'based', 'on', 'the', 'peaks', 'and', 'segment', 'indexes', '.', ':', 'param', '1d', '-', 'array', 'data', ':', 'The', 'orginal', 'axis', 'of', 'the', 'data', 'that', 'was', 'segmented', 'into', 'sections', '.', ':', 'param', '1d', '-', 'array', 'peaks', ':', 'Peaks', 'of', 'the', 'data', '.', ':', 'param', '1d', '-', 'array', 'segment_indexes', ':', 'These', 'are', 'the', 'different', 'classes', 'corresponding', 'to', 'each', 'peak', '.', 'Will', 'not', 'return', 'anything', 'instead', 'it', 'will', 'plot', 'the', 'data', 'and', 'peaks', 'with', 'different', 'colors', 'for', 'each', 'class', '.']
train
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/utils.py#L717-L733
4,647
ilevkivskyi/typing_inspect
typing_inspect.py
is_callable_type
def is_callable_type(tp): """Test if the type is a generic callable type, including subclasses excluding non-generic types and callables. Examples:: is_callable_type(int) == False is_callable_type(type) == False is_callable_type(Callable) == True is_callable_type(Callable[..., int]) == True is_callable_type(Callable[[int, int], Iterable[str]]) == True class MyClass(Callable[[int], int]): ... is_callable_type(MyClass) == True For more general tests use callable(), for more precise test (excluding subclasses) use:: get_origin(tp) is collections.abc.Callable # Callable prior to Python 3.7 """ if NEW_TYPING: return (tp is Callable or isinstance(tp, _GenericAlias) and tp.__origin__ is collections.abc.Callable or isinstance(tp, type) and issubclass(tp, Generic) and issubclass(tp, collections.abc.Callable)) return type(tp) is CallableMeta
python
def is_callable_type(tp): """Test if the type is a generic callable type, including subclasses excluding non-generic types and callables. Examples:: is_callable_type(int) == False is_callable_type(type) == False is_callable_type(Callable) == True is_callable_type(Callable[..., int]) == True is_callable_type(Callable[[int, int], Iterable[str]]) == True class MyClass(Callable[[int], int]): ... is_callable_type(MyClass) == True For more general tests use callable(), for more precise test (excluding subclasses) use:: get_origin(tp) is collections.abc.Callable # Callable prior to Python 3.7 """ if NEW_TYPING: return (tp is Callable or isinstance(tp, _GenericAlias) and tp.__origin__ is collections.abc.Callable or isinstance(tp, type) and issubclass(tp, Generic) and issubclass(tp, collections.abc.Callable)) return type(tp) is CallableMeta
['def', 'is_callable_type', '(', 'tp', ')', ':', 'if', 'NEW_TYPING', ':', 'return', '(', 'tp', 'is', 'Callable', 'or', 'isinstance', '(', 'tp', ',', '_GenericAlias', ')', 'and', 'tp', '.', '__origin__', 'is', 'collections', '.', 'abc', '.', 'Callable', 'or', 'isinstance', '(', 'tp', ',', 'type', ')', 'and', 'issubclass', '(', 'tp', ',', 'Generic', ')', 'and', 'issubclass', '(', 'tp', ',', 'collections', '.', 'abc', '.', 'Callable', ')', ')', 'return', 'type', '(', 'tp', ')', 'is', 'CallableMeta']
Test if the type is a generic callable type, including subclasses excluding non-generic types and callables. Examples:: is_callable_type(int) == False is_callable_type(type) == False is_callable_type(Callable) == True is_callable_type(Callable[..., int]) == True is_callable_type(Callable[[int, int], Iterable[str]]) == True class MyClass(Callable[[int], int]): ... is_callable_type(MyClass) == True For more general tests use callable(), for more precise test (excluding subclasses) use:: get_origin(tp) is collections.abc.Callable # Callable prior to Python 3.7
['Test', 'if', 'the', 'type', 'is', 'a', 'generic', 'callable', 'type', 'including', 'subclasses', 'excluding', 'non', '-', 'generic', 'types', 'and', 'callables', '.', 'Examples', '::']
train
https://github.com/ilevkivskyi/typing_inspect/blob/fd81278cc440b6003f8298bcb22d5bc0f82ee3cd/typing_inspect.py#L66-L90
4,648
inasafe/inasafe
safe/impact_function/impact_function.py
ImpactFunction.intersect_exposure_and_aggregate_hazard
def intersect_exposure_and_aggregate_hazard(self): """This function intersects the exposure with the aggregate hazard. If the the exposure is a continuous raster exposure, this function will set the aggregate hazard layer. However, this function will set the impact layer. """ LOGGER.info('ANALYSIS : Intersect Exposure and Aggregate Hazard') if is_raster_layer(self.exposure): self.set_state_process( 'impact function', 'Zonal stats between exposure and aggregate hazard') # Be careful, our own zonal stats will take care of different # projections between the two layers. We don't want to reproject # rasters. # noinspection PyTypeChecker self._aggregate_hazard_impacted = zonal_stats( self.exposure, self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) self.set_state_process('impact function', 'Add default values') self._aggregate_hazard_impacted = add_default_values( self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) # I know it's redundant, it's just to be sure that we don't have # any impact layer for that IF. self._exposure_summary = None else: indivisible_keys = [f['key'] for f in indivisible_exposure] geometry = self.exposure.geometryType() exposure = self.exposure.keywords.get('exposure') is_divisible = exposure not in indivisible_keys if geometry in [ QgsWkbTypes.LineGeometry, QgsWkbTypes.PolygonGeometry] and is_divisible: self.set_state_process( 'exposure', 'Make exposure layer valid') self._exposure = clean_layer(self.exposure) self.debug_layer(self.exposure) self.set_state_process( 'impact function', 'Make aggregate hazard layer valid') self._aggregate_hazard_impacted = clean_layer( self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) self.set_state_process( 'impact function', 'Intersect divisible features with the aggregate hazard') self._exposure_summary = intersection( self._exposure, self._aggregate_hazard_impacted) self.debug_layer(self._exposure_summary) # If the layer has the size field, it means we need to # recompute counts based on the old and new size. fields = self._exposure_summary.keywords['inasafe_fields'] if size_field['key'] in fields: self.set_state_process( 'impact function', 'Recompute counts') LOGGER.info( 'InaSAFE will not use these counts, as we have ratios ' 'since the exposure preparation step.') self._exposure_summary = recompute_counts( self._exposure_summary) self.debug_layer(self._exposure_summary) else: self.set_state_process( 'impact function', 'Highest class of hazard is assigned to the exposure') self._exposure_summary = assign_highest_value( self._exposure, self._aggregate_hazard_impacted) self.debug_layer(self._exposure_summary) # set title using definition # the title will be overwritten anyway by standard title # set this as fallback. self._exposure_summary.keywords['title'] = ( layer_purpose_exposure_summary['name']) if qgis_version() >= 21800: self._exposure_summary.setName( self._exposure_summary.keywords['title']) else: self._exposure_summary.setLayerName( self._exposure_summary.keywords['title'])
python
def intersect_exposure_and_aggregate_hazard(self): """This function intersects the exposure with the aggregate hazard. If the the exposure is a continuous raster exposure, this function will set the aggregate hazard layer. However, this function will set the impact layer. """ LOGGER.info('ANALYSIS : Intersect Exposure and Aggregate Hazard') if is_raster_layer(self.exposure): self.set_state_process( 'impact function', 'Zonal stats between exposure and aggregate hazard') # Be careful, our own zonal stats will take care of different # projections between the two layers. We don't want to reproject # rasters. # noinspection PyTypeChecker self._aggregate_hazard_impacted = zonal_stats( self.exposure, self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) self.set_state_process('impact function', 'Add default values') self._aggregate_hazard_impacted = add_default_values( self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) # I know it's redundant, it's just to be sure that we don't have # any impact layer for that IF. self._exposure_summary = None else: indivisible_keys = [f['key'] for f in indivisible_exposure] geometry = self.exposure.geometryType() exposure = self.exposure.keywords.get('exposure') is_divisible = exposure not in indivisible_keys if geometry in [ QgsWkbTypes.LineGeometry, QgsWkbTypes.PolygonGeometry] and is_divisible: self.set_state_process( 'exposure', 'Make exposure layer valid') self._exposure = clean_layer(self.exposure) self.debug_layer(self.exposure) self.set_state_process( 'impact function', 'Make aggregate hazard layer valid') self._aggregate_hazard_impacted = clean_layer( self._aggregate_hazard_impacted) self.debug_layer(self._aggregate_hazard_impacted) self.set_state_process( 'impact function', 'Intersect divisible features with the aggregate hazard') self._exposure_summary = intersection( self._exposure, self._aggregate_hazard_impacted) self.debug_layer(self._exposure_summary) # If the layer has the size field, it means we need to # recompute counts based on the old and new size. fields = self._exposure_summary.keywords['inasafe_fields'] if size_field['key'] in fields: self.set_state_process( 'impact function', 'Recompute counts') LOGGER.info( 'InaSAFE will not use these counts, as we have ratios ' 'since the exposure preparation step.') self._exposure_summary = recompute_counts( self._exposure_summary) self.debug_layer(self._exposure_summary) else: self.set_state_process( 'impact function', 'Highest class of hazard is assigned to the exposure') self._exposure_summary = assign_highest_value( self._exposure, self._aggregate_hazard_impacted) self.debug_layer(self._exposure_summary) # set title using definition # the title will be overwritten anyway by standard title # set this as fallback. self._exposure_summary.keywords['title'] = ( layer_purpose_exposure_summary['name']) if qgis_version() >= 21800: self._exposure_summary.setName( self._exposure_summary.keywords['title']) else: self._exposure_summary.setLayerName( self._exposure_summary.keywords['title'])
['def', 'intersect_exposure_and_aggregate_hazard', '(', 'self', ')', ':', 'LOGGER', '.', 'info', '(', "'ANALYSIS : Intersect Exposure and Aggregate Hazard'", ')', 'if', 'is_raster_layer', '(', 'self', '.', 'exposure', ')', ':', 'self', '.', 'set_state_process', '(', "'impact function'", ',', "'Zonal stats between exposure and aggregate hazard'", ')', '# Be careful, our own zonal stats will take care of different', "# projections between the two layers. We don't want to reproject", '# rasters.', '# noinspection PyTypeChecker', 'self', '.', '_aggregate_hazard_impacted', '=', 'zonal_stats', '(', 'self', '.', 'exposure', ',', 'self', '.', '_aggregate_hazard_impacted', ')', 'self', '.', 'debug_layer', '(', 'self', '.', '_aggregate_hazard_impacted', ')', 'self', '.', 'set_state_process', '(', "'impact function'", ',', "'Add default values'", ')', 'self', '.', '_aggregate_hazard_impacted', '=', 'add_default_values', '(', 'self', '.', '_aggregate_hazard_impacted', ')', 'self', '.', 'debug_layer', '(', 'self', '.', '_aggregate_hazard_impacted', ')', "# I know it's redundant, it's just to be sure that we don't have", '# any impact layer for that IF.', 'self', '.', '_exposure_summary', '=', 'None', 'else', ':', 'indivisible_keys', '=', '[', 'f', '[', "'key'", ']', 'for', 'f', 'in', 'indivisible_exposure', ']', 'geometry', '=', 'self', '.', 'exposure', '.', 'geometryType', '(', ')', 'exposure', '=', 'self', '.', 'exposure', '.', 'keywords', '.', 'get', '(', "'exposure'", ')', 'is_divisible', '=', 'exposure', 'not', 'in', 'indivisible_keys', 'if', 'geometry', 'in', '[', 'QgsWkbTypes', '.', 'LineGeometry', ',', 'QgsWkbTypes', '.', 'PolygonGeometry', ']', 'and', 'is_divisible', ':', 'self', '.', 'set_state_process', '(', "'exposure'", ',', "'Make exposure layer valid'", ')', 'self', '.', '_exposure', '=', 'clean_layer', '(', 'self', '.', 'exposure', ')', 'self', '.', 'debug_layer', '(', 'self', '.', 'exposure', ')', 'self', '.', 'set_state_process', '(', "'impact function'", ',', "'Make aggregate hazard layer valid'", ')', 'self', '.', '_aggregate_hazard_impacted', '=', 'clean_layer', '(', 'self', '.', '_aggregate_hazard_impacted', ')', 'self', '.', 'debug_layer', '(', 'self', '.', '_aggregate_hazard_impacted', ')', 'self', '.', 'set_state_process', '(', "'impact function'", ',', "'Intersect divisible features with the aggregate hazard'", ')', 'self', '.', '_exposure_summary', '=', 'intersection', '(', 'self', '.', '_exposure', ',', 'self', '.', '_aggregate_hazard_impacted', ')', 'self', '.', 'debug_layer', '(', 'self', '.', '_exposure_summary', ')', '# If the layer has the size field, it means we need to', '# recompute counts based on the old and new size.', 'fields', '=', 'self', '.', '_exposure_summary', '.', 'keywords', '[', "'inasafe_fields'", ']', 'if', 'size_field', '[', "'key'", ']', 'in', 'fields', ':', 'self', '.', 'set_state_process', '(', "'impact function'", ',', "'Recompute counts'", ')', 'LOGGER', '.', 'info', '(', "'InaSAFE will not use these counts, as we have ratios '", "'since the exposure preparation step.'", ')', 'self', '.', '_exposure_summary', '=', 'recompute_counts', '(', 'self', '.', '_exposure_summary', ')', 'self', '.', 'debug_layer', '(', 'self', '.', '_exposure_summary', ')', 'else', ':', 'self', '.', 'set_state_process', '(', "'impact function'", ',', "'Highest class of hazard is assigned to the exposure'", ')', 'self', '.', '_exposure_summary', '=', 'assign_highest_value', '(', 'self', '.', '_exposure', ',', 'self', '.', '_aggregate_hazard_impacted', ')', 'self', '.', 'debug_layer', '(', 'self', '.', '_exposure_summary', ')', '# set title using definition', '# the title will be overwritten anyway by standard title', '# set this as fallback.', 'self', '.', '_exposure_summary', '.', 'keywords', '[', "'title'", ']', '=', '(', 'layer_purpose_exposure_summary', '[', "'name'", ']', ')', 'if', 'qgis_version', '(', ')', '>=', '21800', ':', 'self', '.', '_exposure_summary', '.', 'setName', '(', 'self', '.', '_exposure_summary', '.', 'keywords', '[', "'title'", ']', ')', 'else', ':', 'self', '.', '_exposure_summary', '.', 'setLayerName', '(', 'self', '.', '_exposure_summary', '.', 'keywords', '[', "'title'", ']', ')']
This function intersects the exposure with the aggregate hazard. If the the exposure is a continuous raster exposure, this function will set the aggregate hazard layer. However, this function will set the impact layer.
['This', 'function', 'intersects', 'the', 'exposure', 'with', 'the', 'aggregate', 'hazard', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/impact_function/impact_function.py#L2310-L2400
4,649
wiheto/teneto
teneto/timeseries/postprocess.py
postpro_fisher
def postpro_fisher(data, report=None): """ Performs fisher transform on everything in data. If report variable is passed, this is added to the report. """ if not report: report = {} # Due to rounding errors data[data < -0.99999999999999] = -1 data[data > 0.99999999999999] = 1 fisher_data = 0.5 * np.log((1 + data) / (1 - data)) report['fisher'] = {} report['fisher']['performed'] = 'yes' #report['fisher']['diagonal'] = 'zeroed' return fisher_data, report
python
def postpro_fisher(data, report=None): """ Performs fisher transform on everything in data. If report variable is passed, this is added to the report. """ if not report: report = {} # Due to rounding errors data[data < -0.99999999999999] = -1 data[data > 0.99999999999999] = 1 fisher_data = 0.5 * np.log((1 + data) / (1 - data)) report['fisher'] = {} report['fisher']['performed'] = 'yes' #report['fisher']['diagonal'] = 'zeroed' return fisher_data, report
['def', 'postpro_fisher', '(', 'data', ',', 'report', '=', 'None', ')', ':', 'if', 'not', 'report', ':', 'report', '=', '{', '}', '# Due to rounding errors', 'data', '[', 'data', '<', '-', '0.99999999999999', ']', '=', '-', '1', 'data', '[', 'data', '>', '0.99999999999999', ']', '=', '1', 'fisher_data', '=', '0.5', '*', 'np', '.', 'log', '(', '(', '1', '+', 'data', ')', '/', '(', '1', '-', 'data', ')', ')', 'report', '[', "'fisher'", ']', '=', '{', '}', 'report', '[', "'fisher'", ']', '[', "'performed'", ']', '=', "'yes'", "#report['fisher']['diagonal'] = 'zeroed'", 'return', 'fisher_data', ',', 'report']
Performs fisher transform on everything in data. If report variable is passed, this is added to the report.
['Performs', 'fisher', 'transform', 'on', 'everything', 'in', 'data', '.']
train
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/postprocess.py#L10-L25
4,650
nerdvegas/rez
src/rez/package_serialise.py
dump_package_data
def dump_package_data(data, buf, format_=FileFormat.py, skip_attributes=None): """Write package data to `buf`. Args: data (dict): Data source - must conform to `package_serialise_schema`. buf (file-like object): Destination stream. format_ (`FileFormat`): Format to dump data in. skip_attributes (list of str): List of attributes to not print. """ if format_ == FileFormat.txt: raise ValueError("'txt' format not supported for packages.") data_ = dict((k, v) for k, v in data.iteritems() if v is not None) data_ = package_serialise_schema.validate(data_) skip = set(skip_attributes or []) items = [] for key in package_key_order: if key not in skip: value = data_.pop(key, None) if value is not None: items.append((key, value)) # remaining are arbitrary keys for key, value in data_.iteritems(): if key not in skip: items.append((key, value)) dump_func = dump_functions[format_] dump_func(items, buf)
python
def dump_package_data(data, buf, format_=FileFormat.py, skip_attributes=None): """Write package data to `buf`. Args: data (dict): Data source - must conform to `package_serialise_schema`. buf (file-like object): Destination stream. format_ (`FileFormat`): Format to dump data in. skip_attributes (list of str): List of attributes to not print. """ if format_ == FileFormat.txt: raise ValueError("'txt' format not supported for packages.") data_ = dict((k, v) for k, v in data.iteritems() if v is not None) data_ = package_serialise_schema.validate(data_) skip = set(skip_attributes or []) items = [] for key in package_key_order: if key not in skip: value = data_.pop(key, None) if value is not None: items.append((key, value)) # remaining are arbitrary keys for key, value in data_.iteritems(): if key not in skip: items.append((key, value)) dump_func = dump_functions[format_] dump_func(items, buf)
['def', 'dump_package_data', '(', 'data', ',', 'buf', ',', 'format_', '=', 'FileFormat', '.', 'py', ',', 'skip_attributes', '=', 'None', ')', ':', 'if', 'format_', '==', 'FileFormat', '.', 'txt', ':', 'raise', 'ValueError', '(', '"\'txt\' format not supported for packages."', ')', 'data_', '=', 'dict', '(', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'data', '.', 'iteritems', '(', ')', 'if', 'v', 'is', 'not', 'None', ')', 'data_', '=', 'package_serialise_schema', '.', 'validate', '(', 'data_', ')', 'skip', '=', 'set', '(', 'skip_attributes', 'or', '[', ']', ')', 'items', '=', '[', ']', 'for', 'key', 'in', 'package_key_order', ':', 'if', 'key', 'not', 'in', 'skip', ':', 'value', '=', 'data_', '.', 'pop', '(', 'key', ',', 'None', ')', 'if', 'value', 'is', 'not', 'None', ':', 'items', '.', 'append', '(', '(', 'key', ',', 'value', ')', ')', '# remaining are arbitrary keys', 'for', 'key', ',', 'value', 'in', 'data_', '.', 'iteritems', '(', ')', ':', 'if', 'key', 'not', 'in', 'skip', ':', 'items', '.', 'append', '(', '(', 'key', ',', 'value', ')', ')', 'dump_func', '=', 'dump_functions', '[', 'format_', ']', 'dump_func', '(', 'items', ',', 'buf', ')']
Write package data to `buf`. Args: data (dict): Data source - must conform to `package_serialise_schema`. buf (file-like object): Destination stream. format_ (`FileFormat`): Format to dump data in. skip_attributes (list of str): List of attributes to not print.
['Write', 'package', 'data', 'to', 'buf', '.']
train
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_serialise.py#L97-L126
4,651
The-Politico/politico-civic-election
election/models/election_type.py
ElectionType.save
def save(self, *args, **kwargs): """ **uid**: :code:`electiontype:{name}` """ self.uid = 'electiontype:{}'.format(self.slug) super(ElectionType, self).save(*args, **kwargs)
python
def save(self, *args, **kwargs): """ **uid**: :code:`electiontype:{name}` """ self.uid = 'electiontype:{}'.format(self.slug) super(ElectionType, self).save(*args, **kwargs)
['def', 'save', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'uid', '=', "'electiontype:{}'", '.', 'format', '(', 'self', '.', 'slug', ')', 'super', '(', 'ElectionType', ',', 'self', ')', '.', 'save', '(', '*', 'args', ',', '*', '*', 'kwargs', ')']
**uid**: :code:`electiontype:{name}`
['**', 'uid', '**', ':', ':', 'code', ':', 'electiontype', ':', '{', 'name', '}']
train
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election_type.py#L42-L47
4,652
biocore/burrito-fillings
bfillings/uclust.py
uclust_cluster_from_sorted_fasta_filepath
def uclust_cluster_from_sorted_fasta_filepath( fasta_filepath, uc_save_filepath=None, percent_ID=0.97, max_accepts=1, max_rejects=8, stepwords=8, word_length=8, optimal=False, exact=False, suppress_sort=False, enable_rev_strand_matching=False, subject_fasta_filepath=None, suppress_new_clusters=False, stable_sort=False, tmp_dir=gettempdir(), HALT_EXEC=False): """ Returns clustered uclust file from sorted fasta""" output_filepath = uc_save_filepath if not output_filepath: _, output_filepath = mkstemp(dir=tmp_dir, prefix='uclust_clusters', suffix='.uc') params = {'--id': percent_ID, '--maxaccepts': max_accepts, '--maxrejects': max_rejects, '--stepwords': stepwords, '--w': word_length, '--tmpdir': tmp_dir} app = Uclust(params, TmpDir=tmp_dir, HALT_EXEC=HALT_EXEC) # Set any additional parameters specified by the user if enable_rev_strand_matching: app.Parameters['--rev'].on() if optimal: app.Parameters['--optimal'].on() if exact: app.Parameters['--exact'].on() if suppress_sort: app.Parameters['--usersort'].on() if subject_fasta_filepath: app.Parameters['--lib'].on(subject_fasta_filepath) if suppress_new_clusters: app.Parameters['--libonly'].on() if stable_sort: app.Parameters['--stable_sort'].on() app_result = app({'--input': fasta_filepath, '--uc': output_filepath}) return app_result
python
def uclust_cluster_from_sorted_fasta_filepath( fasta_filepath, uc_save_filepath=None, percent_ID=0.97, max_accepts=1, max_rejects=8, stepwords=8, word_length=8, optimal=False, exact=False, suppress_sort=False, enable_rev_strand_matching=False, subject_fasta_filepath=None, suppress_new_clusters=False, stable_sort=False, tmp_dir=gettempdir(), HALT_EXEC=False): """ Returns clustered uclust file from sorted fasta""" output_filepath = uc_save_filepath if not output_filepath: _, output_filepath = mkstemp(dir=tmp_dir, prefix='uclust_clusters', suffix='.uc') params = {'--id': percent_ID, '--maxaccepts': max_accepts, '--maxrejects': max_rejects, '--stepwords': stepwords, '--w': word_length, '--tmpdir': tmp_dir} app = Uclust(params, TmpDir=tmp_dir, HALT_EXEC=HALT_EXEC) # Set any additional parameters specified by the user if enable_rev_strand_matching: app.Parameters['--rev'].on() if optimal: app.Parameters['--optimal'].on() if exact: app.Parameters['--exact'].on() if suppress_sort: app.Parameters['--usersort'].on() if subject_fasta_filepath: app.Parameters['--lib'].on(subject_fasta_filepath) if suppress_new_clusters: app.Parameters['--libonly'].on() if stable_sort: app.Parameters['--stable_sort'].on() app_result = app({'--input': fasta_filepath, '--uc': output_filepath}) return app_result
['def', 'uclust_cluster_from_sorted_fasta_filepath', '(', 'fasta_filepath', ',', 'uc_save_filepath', '=', 'None', ',', 'percent_ID', '=', '0.97', ',', 'max_accepts', '=', '1', ',', 'max_rejects', '=', '8', ',', 'stepwords', '=', '8', ',', 'word_length', '=', '8', ',', 'optimal', '=', 'False', ',', 'exact', '=', 'False', ',', 'suppress_sort', '=', 'False', ',', 'enable_rev_strand_matching', '=', 'False', ',', 'subject_fasta_filepath', '=', 'None', ',', 'suppress_new_clusters', '=', 'False', ',', 'stable_sort', '=', 'False', ',', 'tmp_dir', '=', 'gettempdir', '(', ')', ',', 'HALT_EXEC', '=', 'False', ')', ':', 'output_filepath', '=', 'uc_save_filepath', 'if', 'not', 'output_filepath', ':', '_', ',', 'output_filepath', '=', 'mkstemp', '(', 'dir', '=', 'tmp_dir', ',', 'prefix', '=', "'uclust_clusters'", ',', 'suffix', '=', "'.uc'", ')', 'params', '=', '{', "'--id'", ':', 'percent_ID', ',', "'--maxaccepts'", ':', 'max_accepts', ',', "'--maxrejects'", ':', 'max_rejects', ',', "'--stepwords'", ':', 'stepwords', ',', "'--w'", ':', 'word_length', ',', "'--tmpdir'", ':', 'tmp_dir', '}', 'app', '=', 'Uclust', '(', 'params', ',', 'TmpDir', '=', 'tmp_dir', ',', 'HALT_EXEC', '=', 'HALT_EXEC', ')', '# Set any additional parameters specified by the user', 'if', 'enable_rev_strand_matching', ':', 'app', '.', 'Parameters', '[', "'--rev'", ']', '.', 'on', '(', ')', 'if', 'optimal', ':', 'app', '.', 'Parameters', '[', "'--optimal'", ']', '.', 'on', '(', ')', 'if', 'exact', ':', 'app', '.', 'Parameters', '[', "'--exact'", ']', '.', 'on', '(', ')', 'if', 'suppress_sort', ':', 'app', '.', 'Parameters', '[', "'--usersort'", ']', '.', 'on', '(', ')', 'if', 'subject_fasta_filepath', ':', 'app', '.', 'Parameters', '[', "'--lib'", ']', '.', 'on', '(', 'subject_fasta_filepath', ')', 'if', 'suppress_new_clusters', ':', 'app', '.', 'Parameters', '[', "'--libonly'", ']', '.', 'on', '(', ')', 'if', 'stable_sort', ':', 'app', '.', 'Parameters', '[', "'--stable_sort'", ']', '.', 'on', '(', ')', 'app_result', '=', 'app', '(', '{', "'--input'", ':', 'fasta_filepath', ',', "'--uc'", ':', 'output_filepath', '}', ')', 'return', 'app_result']
Returns clustered uclust file from sorted fasta
['Returns', 'clustered', 'uclust', 'file', 'from', 'sorted', 'fasta']
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/uclust.py#L433-L482
4,653
facelessuser/wcmatch
wcmatch/_wcparse.py
translate
def translate(patterns, flags): """Translate patterns.""" positive = [] negative = [] if isinstance(patterns, (str, bytes)): patterns = [patterns] flags |= _TRANSLATE for pattern in patterns: for expanded in expand_braces(pattern, flags): (negative if is_negative(expanded, flags) else positive).append( WcParse(expanded, flags & FLAG_MASK).parse() ) if patterns and flags & REALPATH and negative and not positive: positive.append(_compile(b'**' if isinstance(patterns[0], bytes) else '**', flags)) return positive, negative
python
def translate(patterns, flags): """Translate patterns.""" positive = [] negative = [] if isinstance(patterns, (str, bytes)): patterns = [patterns] flags |= _TRANSLATE for pattern in patterns: for expanded in expand_braces(pattern, flags): (negative if is_negative(expanded, flags) else positive).append( WcParse(expanded, flags & FLAG_MASK).parse() ) if patterns and flags & REALPATH and negative and not positive: positive.append(_compile(b'**' if isinstance(patterns[0], bytes) else '**', flags)) return positive, negative
['def', 'translate', '(', 'patterns', ',', 'flags', ')', ':', 'positive', '=', '[', ']', 'negative', '=', '[', ']', 'if', 'isinstance', '(', 'patterns', ',', '(', 'str', ',', 'bytes', ')', ')', ':', 'patterns', '=', '[', 'patterns', ']', 'flags', '|=', '_TRANSLATE', 'for', 'pattern', 'in', 'patterns', ':', 'for', 'expanded', 'in', 'expand_braces', '(', 'pattern', ',', 'flags', ')', ':', '(', 'negative', 'if', 'is_negative', '(', 'expanded', ',', 'flags', ')', 'else', 'positive', ')', '.', 'append', '(', 'WcParse', '(', 'expanded', ',', 'flags', '&', 'FLAG_MASK', ')', '.', 'parse', '(', ')', ')', 'if', 'patterns', 'and', 'flags', '&', 'REALPATH', 'and', 'negative', 'and', 'not', 'positive', ':', 'positive', '.', 'append', '(', '_compile', '(', "b'**'", 'if', 'isinstance', '(', 'patterns', '[', '0', ']', ',', 'bytes', ')', 'else', "'**'", ',', 'flags', ')', ')', 'return', 'positive', ',', 'negative']
Translate patterns.
['Translate', 'patterns', '.']
train
https://github.com/facelessuser/wcmatch/blob/d153e7007cc73b994ae1ba553dc4584039f5c212/wcmatch/_wcparse.py#L203-L222
4,654
ensime/ensime-vim
ensime_shared/editor.py
Editor.split_window
def split_window(self, fpath, vertical=False, size=None, bufopts=None): """Open file in a new split window. Args: fpath (str): Path of the file to open. If ``None``, a new empty split is created. vertical (bool): Whether to open a vertical split. size (Optional[int]): The height (or width) to set for the new window. bufopts (Optional[dict]): Buffer-local options to set in the split window. See :func:`.set_buffer_options`. """ command = 'split {}'.format(fpath) if fpath else 'new' if vertical: command = 'v' + command if size: command = str(size) + command self._vim.command(command) if bufopts: self.set_buffer_options(bufopts)
python
def split_window(self, fpath, vertical=False, size=None, bufopts=None): """Open file in a new split window. Args: fpath (str): Path of the file to open. If ``None``, a new empty split is created. vertical (bool): Whether to open a vertical split. size (Optional[int]): The height (or width) to set for the new window. bufopts (Optional[dict]): Buffer-local options to set in the split window. See :func:`.set_buffer_options`. """ command = 'split {}'.format(fpath) if fpath else 'new' if vertical: command = 'v' + command if size: command = str(size) + command self._vim.command(command) if bufopts: self.set_buffer_options(bufopts)
['def', 'split_window', '(', 'self', ',', 'fpath', ',', 'vertical', '=', 'False', ',', 'size', '=', 'None', ',', 'bufopts', '=', 'None', ')', ':', 'command', '=', "'split {}'", '.', 'format', '(', 'fpath', ')', 'if', 'fpath', 'else', "'new'", 'if', 'vertical', ':', 'command', '=', "'v'", '+', 'command', 'if', 'size', ':', 'command', '=', 'str', '(', 'size', ')', '+', 'command', 'self', '.', '_vim', '.', 'command', '(', 'command', ')', 'if', 'bufopts', ':', 'self', '.', 'set_buffer_options', '(', 'bufopts', ')']
Open file in a new split window. Args: fpath (str): Path of the file to open. If ``None``, a new empty split is created. vertical (bool): Whether to open a vertical split. size (Optional[int]): The height (or width) to set for the new window. bufopts (Optional[dict]): Buffer-local options to set in the split window. See :func:`.set_buffer_options`.
['Open', 'file', 'in', 'a', 'new', 'split', 'window', '.']
train
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/editor.py#L165-L185
4,655
mlperf/training
translation/tensorflow/transformer/utils/tokenizer.py
Subtokenizer.decode
def decode(self, subtokens): """Converts list of int subtokens ids into a string.""" if isinstance(subtokens, np.ndarray): # Note that list(subtokens) converts subtokens to a python list, but the # items remain as np.int32. This converts both the array and its items. subtokens = subtokens.tolist() if not subtokens: return "" assert isinstance(subtokens, list) and isinstance(subtokens[0], int), ( "Subtokens argument passed into decode() must be a list of integers.") return _unicode_to_native( _join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))
python
def decode(self, subtokens): """Converts list of int subtokens ids into a string.""" if isinstance(subtokens, np.ndarray): # Note that list(subtokens) converts subtokens to a python list, but the # items remain as np.int32. This converts both the array and its items. subtokens = subtokens.tolist() if not subtokens: return "" assert isinstance(subtokens, list) and isinstance(subtokens[0], int), ( "Subtokens argument passed into decode() must be a list of integers.") return _unicode_to_native( _join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))
['def', 'decode', '(', 'self', ',', 'subtokens', ')', ':', 'if', 'isinstance', '(', 'subtokens', ',', 'np', '.', 'ndarray', ')', ':', '# Note that list(subtokens) converts subtokens to a python list, but the', '# items remain as np.int32. This converts both the array and its items.', 'subtokens', '=', 'subtokens', '.', 'tolist', '(', ')', 'if', 'not', 'subtokens', ':', 'return', '""', 'assert', 'isinstance', '(', 'subtokens', ',', 'list', ')', 'and', 'isinstance', '(', 'subtokens', '[', '0', ']', ',', 'int', ')', ',', '(', '"Subtokens argument passed into decode() must be a list of integers."', ')', 'return', '_unicode_to_native', '(', '_join_tokens_to_string', '(', 'self', '.', '_subtoken_ids_to_tokens', '(', 'subtokens', ')', ')', ')']
Converts list of int subtokens ids into a string.
['Converts', 'list', 'of', 'int', 'subtokens', 'ids', 'into', 'a', 'string', '.']
train
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/tokenizer.py#L153-L167
4,656
diging/tethne
tethne/classes/corpus.py
Corpus.top_features
def top_features(self, featureset_name, topn=20, by='counts', perslice=False, slice_kwargs={}): """ Retrieves the top ``topn`` most numerous features in the corpus. Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. topn : int (default: ``20``) Number of features to return. by : str (default: ``'counts'``) If ``'counts'``, uses the sum of feature count values to rank features. If ``'documentCounts'``, uses the number of papers in which features occur. perslice : bool (default: False) If True, retrieves the top ``topn`` features in each slice. slice_kwargs : kwargs If ``perslice=True``, these keyword arguments are passed to :meth:`.Corpus.slice`\. """ if perslice: return [(k, subcorpus.features[featureset_name].top(topn, by=by)) for k, subcorpus in self.slice(**slice_kwargs)] return self.features[featureset_name].top(topn, by=by)
python
def top_features(self, featureset_name, topn=20, by='counts', perslice=False, slice_kwargs={}): """ Retrieves the top ``topn`` most numerous features in the corpus. Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. topn : int (default: ``20``) Number of features to return. by : str (default: ``'counts'``) If ``'counts'``, uses the sum of feature count values to rank features. If ``'documentCounts'``, uses the number of papers in which features occur. perslice : bool (default: False) If True, retrieves the top ``topn`` features in each slice. slice_kwargs : kwargs If ``perslice=True``, these keyword arguments are passed to :meth:`.Corpus.slice`\. """ if perslice: return [(k, subcorpus.features[featureset_name].top(topn, by=by)) for k, subcorpus in self.slice(**slice_kwargs)] return self.features[featureset_name].top(topn, by=by)
['def', 'top_features', '(', 'self', ',', 'featureset_name', ',', 'topn', '=', '20', ',', 'by', '=', "'counts'", ',', 'perslice', '=', 'False', ',', 'slice_kwargs', '=', '{', '}', ')', ':', 'if', 'perslice', ':', 'return', '[', '(', 'k', ',', 'subcorpus', '.', 'features', '[', 'featureset_name', ']', '.', 'top', '(', 'topn', ',', 'by', '=', 'by', ')', ')', 'for', 'k', ',', 'subcorpus', 'in', 'self', '.', 'slice', '(', '*', '*', 'slice_kwargs', ')', ']', 'return', 'self', '.', 'features', '[', 'featureset_name', ']', '.', 'top', '(', 'topn', ',', 'by', '=', 'by', ')']
Retrieves the top ``topn`` most numerous features in the corpus. Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. topn : int (default: ``20``) Number of features to return. by : str (default: ``'counts'``) If ``'counts'``, uses the sum of feature count values to rank features. If ``'documentCounts'``, uses the number of papers in which features occur. perslice : bool (default: False) If True, retrieves the top ``topn`` features in each slice. slice_kwargs : kwargs If ``perslice=True``, these keyword arguments are passed to :meth:`.Corpus.slice`\.
['Retrieves', 'the', 'top', 'topn', 'most', 'numerous', 'features', 'in', 'the', 'corpus', '.']
train
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/corpus.py#L687-L713
4,657
ianmiell/shutit
shutit_pexpect.py
ShutItPexpectSession.change_text
def change_text(self, text, fname, pattern=None, before=False, force=False, delete=False, note=None, replace=False, line_oriented=True, create=True, loglevel=logging.DEBUG): """Change text in a file. Returns None if there was no match for the regexp, True if it was matched and replaced, and False if the file did not exist or there was some other problem. @param text: Text to insert. @param fname: Filename to insert text to @param pattern: Regexp for a line to match and insert after/before/replace. If none, put at end of file. @param before: Whether to place the text before or after the matched text. @param force: Force the insertion even if the text is in the file. @param delete: Delete text from file rather than insert @param replace: Replace matched text with passed-in text. If nothing matches, then append. @param note: See send() @param line_oriented: Consider the pattern on a per-line basis (default True). Can match any continuous section of the line, eg 'b.*d' will match the line: 'abcde' If not line_oriented, the regexp is considered on with the flags re.DOTALL, re.MULTILINE enabled """ shutit = self.shutit shutit.handle_note(note) fexists = self.file_exists(fname) if not fexists: if create: self.send(ShutItSendSpec(self, send=' command touch ' + fname, echo=False, loglevel=loglevel, ignore_background=True)) else: shutit.fail(fname + ' does not exist and create=False') # pragma: no cover if replace: # If replace and no pattern FAIL if not pattern: shutit.fail('replace=True requires a pattern to be passed in') # pragma: no cover # If replace and delete FAIL if delete: shutit.fail('cannot pass replace=True and delete=True to insert_text') # pragma: no cover # ftext is the original file's text. If base64 is available, use it to # encode the text if self.command_available('base64'): if shutit_global.shutit_global_object.ispy3: ftext = bytes(self.send_and_get_output(' command base64 ' + fname, echo=False, loglevel=loglevel), shutit_global.shutit_global_object.default_encoding) else: ftext = self.send_and_get_output(' command base64 ' + fname, echo=False, loglevel=loglevel) ftext = base64.b64decode(ftext) else: # Replace the file text's ^M-newlines with simple newlines if shutit_global.shutit_global_object.ispy3: ftext = bytes(self.send_and_get_output(' command cat ' + fname, echo=False, loglevel=loglevel), shutit_global.shutit_global_object.default_encoding) ftext = ftext.replace(bytes('\r\n', shutit_global.shutit_global_object.default_encoding),bytes('\n', shutit_global.shutit_global_object.default_encoding)) else: ftext = self.send_and_get_output(' command cat ' + fname, echo=False, loglevel=loglevel) ftext = ftext.replace('\r\n','\n') # Delete the text if delete: if shutit_global.shutit_global_object.ispy3: loc = ftext.find(bytes(text,shutit_global.shutit_global_object.default_encoding)) else: loc = ftext.find(text) if loc == -1: # No output - no match return None else: new_text = ftext[:loc] + ftext[loc+len(text)+1:] else: if pattern != None: if not line_oriented: if not shutit_util.check_regexp(pattern): shutit.fail('Illegal regexp found in change_text call: ' + pattern) # pragma: no cover # cf: http://stackoverflow.com/questions/9411041/matching-ranges-of-lines-in-python-like-sed-ranges if shutit_global.shutit_global_object.ispy3: sre_match = re.search(bytes(pattern,shutit_global.shutit_global_object.default_encoding),ftext,re.DOTALL|re.MULTILINE) else: sre_match = re.search(pattern,ftext,re.DOTALL|re.MULTILINE) if replace: if sre_match is None: cut_point = len(ftext) newtext1 = ftext[:cut_point] newtext2 = ftext[cut_point:] else: cut_point = sre_match.start() cut_point_after = sre_match.end() newtext1 = ftext[:cut_point] newtext2 = ftext[cut_point_after:] else: if sre_match is None: # No output - no match return None elif before: cut_point = sre_match.start() # If the text is already there and we're not forcing it, return None. if shutit_global.shutit_global_object.ispy3: if not force and ftext[cut_point-len(text):].find(bytes(text,shutit_global.shutit_global_object.default_encoding)) > 0: return None else: if not force and ftext[cut_point-len(text):].find(text) > 0: return None else: cut_point = sre_match.end() # If the text is already there and we're not forcing it, return None. if shutit_global.shutit_global_object.ispy3: if not force and ftext[cut_point:].find(bytes(text,shutit_global.shutit_global_object.default_encoding)) > 0: return None else: if not force and ftext[cut_point:].find(text) > 0: return None newtext1 = ftext[:cut_point] newtext2 = ftext[cut_point:] else: if shutit_global.shutit_global_object.ispy3: lines = ftext.split(bytes('\n',shutit_global.shutit_global_object.default_encoding)) else: lines = ftext.split('\n') cut_point = 0 line_length = 0 matched = False if not shutit_util.check_regexp(pattern): shutit.fail('Illegal regexp found in change_text call: ' + pattern) # pragma: no cover for line in lines: #Help the user out to make this properly line-oriented pattern_before='' pattern_after='' if not pattern or pattern[0] != '^': pattern_before = '^.*' if not pattern or pattern[-1] != '$': pattern_after = '.*$' new_pattern = pattern_before+pattern+pattern_after if shutit_global.shutit_global_object.ispy3: match = re.search(bytes(new_pattern,shutit_global.shutit_global_object.default_encoding), line) else: match = re.search(new_pattern, line) line_length = len(line) if match != None: matched=True break # Update cut point to next line, including newline in original text cut_point += line_length+1 if not replace and not matched: # No match, return none return None if replace and not matched: cut_point = len(ftext) elif not replace and not before: cut_point += line_length # newtext1 is everything up to the cutpoint newtext1 = ftext[:cut_point] # newtext2 is everything after the cutpoint newtext2 = ftext[cut_point:] # if replacing and we matched the output in a line, then set newtext2 to be everything from cutpoint's line end if replace and matched: newtext2 = ftext[cut_point+line_length:] elif not force: # If the text is already there and we're not forcing it, return None. if shutit_global.shutit_global_object.ispy3: if before and ftext[cut_point-len(text):].find(bytes(text,shutit_global.shutit_global_object.default_encoding)) > 0: return None if not before and ftext[cut_point:].find(bytes(text,shutit_global.shutit_global_object.default_encoding)) > 0: return None else: if before and ftext[cut_point-len(text):].find(text) > 0: return None if not before and ftext[cut_point:].find(text) > 0: return None # Add a newline to newtext1 if it is not already there if shutit_global.shutit_global_object.ispy3: if newtext1 and bytes(newtext1.decode(shutit_global.shutit_global_object.default_encoding)[-1],shutit_global.shutit_global_object.default_encoding) != bytes('\n',shutit_global.shutit_global_object.default_encoding): newtext1 += bytes('\n',shutit_global.shutit_global_object.default_encoding) else: if newtext1 and newtext1[-1] != '\n': newtext1 += '\n' # Add a newline to newtext2 if it is not already there if shutit_global.shutit_global_object.ispy3: if newtext2 and bytes(newtext2.decode(shutit_global.shutit_global_object.default_encoding)[0],shutit_global.shutit_global_object.default_encoding) != bytes('\n',shutit_global.shutit_global_object.default_encoding): newtext2 = bytes('\n',shutit_global.shutit_global_object.default_encoding) + newtext2 else: if newtext2 and newtext2[0] != '\n': newtext2 = '\n' + newtext2 else: # Append to file absent a pattern. cut_point = len(ftext) newtext1 = ftext[:cut_point] newtext2 = ftext[cut_point:] # If adding or replacing at the end of the file, then ensure we have a newline at the end if shutit_global.shutit_global_object.ispy3: if newtext2 == b'' and text and bytes(text[-1],shutit_global.shutit_global_object.default_encoding) != bytes('\n',shutit_global.shutit_global_object.default_encoding): newtext2 = bytes('\n',shutit_global.shutit_global_object.default_encoding) else: if newtext2 == '' and text and text[-1] != '\n': newtext2 = '\n' if shutit_global.shutit_global_object.ispy3: new_text = newtext1 + bytes(text,shutit_global.shutit_global_object.default_encoding) + newtext2 else: new_text = newtext1 + text + newtext2 self.send_file(fname, new_text, truncate=True, loglevel=loglevel) shutit.handle_note_after(note=note) return True
python
def change_text(self, text, fname, pattern=None, before=False, force=False, delete=False, note=None, replace=False, line_oriented=True, create=True, loglevel=logging.DEBUG): """Change text in a file. Returns None if there was no match for the regexp, True if it was matched and replaced, and False if the file did not exist or there was some other problem. @param text: Text to insert. @param fname: Filename to insert text to @param pattern: Regexp for a line to match and insert after/before/replace. If none, put at end of file. @param before: Whether to place the text before or after the matched text. @param force: Force the insertion even if the text is in the file. @param delete: Delete text from file rather than insert @param replace: Replace matched text with passed-in text. If nothing matches, then append. @param note: See send() @param line_oriented: Consider the pattern on a per-line basis (default True). Can match any continuous section of the line, eg 'b.*d' will match the line: 'abcde' If not line_oriented, the regexp is considered on with the flags re.DOTALL, re.MULTILINE enabled """ shutit = self.shutit shutit.handle_note(note) fexists = self.file_exists(fname) if not fexists: if create: self.send(ShutItSendSpec(self, send=' command touch ' + fname, echo=False, loglevel=loglevel, ignore_background=True)) else: shutit.fail(fname + ' does not exist and create=False') # pragma: no cover if replace: # If replace and no pattern FAIL if not pattern: shutit.fail('replace=True requires a pattern to be passed in') # pragma: no cover # If replace and delete FAIL if delete: shutit.fail('cannot pass replace=True and delete=True to insert_text') # pragma: no cover # ftext is the original file's text. If base64 is available, use it to # encode the text if self.command_available('base64'): if shutit_global.shutit_global_object.ispy3: ftext = bytes(self.send_and_get_output(' command base64 ' + fname, echo=False, loglevel=loglevel), shutit_global.shutit_global_object.default_encoding) else: ftext = self.send_and_get_output(' command base64 ' + fname, echo=False, loglevel=loglevel) ftext = base64.b64decode(ftext) else: # Replace the file text's ^M-newlines with simple newlines if shutit_global.shutit_global_object.ispy3: ftext = bytes(self.send_and_get_output(' command cat ' + fname, echo=False, loglevel=loglevel), shutit_global.shutit_global_object.default_encoding) ftext = ftext.replace(bytes('\r\n', shutit_global.shutit_global_object.default_encoding),bytes('\n', shutit_global.shutit_global_object.default_encoding)) else: ftext = self.send_and_get_output(' command cat ' + fname, echo=False, loglevel=loglevel) ftext = ftext.replace('\r\n','\n') # Delete the text if delete: if shutit_global.shutit_global_object.ispy3: loc = ftext.find(bytes(text,shutit_global.shutit_global_object.default_encoding)) else: loc = ftext.find(text) if loc == -1: # No output - no match return None else: new_text = ftext[:loc] + ftext[loc+len(text)+1:] else: if pattern != None: if not line_oriented: if not shutit_util.check_regexp(pattern): shutit.fail('Illegal regexp found in change_text call: ' + pattern) # pragma: no cover # cf: http://stackoverflow.com/questions/9411041/matching-ranges-of-lines-in-python-like-sed-ranges if shutit_global.shutit_global_object.ispy3: sre_match = re.search(bytes(pattern,shutit_global.shutit_global_object.default_encoding),ftext,re.DOTALL|re.MULTILINE) else: sre_match = re.search(pattern,ftext,re.DOTALL|re.MULTILINE) if replace: if sre_match is None: cut_point = len(ftext) newtext1 = ftext[:cut_point] newtext2 = ftext[cut_point:] else: cut_point = sre_match.start() cut_point_after = sre_match.end() newtext1 = ftext[:cut_point] newtext2 = ftext[cut_point_after:] else: if sre_match is None: # No output - no match return None elif before: cut_point = sre_match.start() # If the text is already there and we're not forcing it, return None. if shutit_global.shutit_global_object.ispy3: if not force and ftext[cut_point-len(text):].find(bytes(text,shutit_global.shutit_global_object.default_encoding)) > 0: return None else: if not force and ftext[cut_point-len(text):].find(text) > 0: return None else: cut_point = sre_match.end() # If the text is already there and we're not forcing it, return None. if shutit_global.shutit_global_object.ispy3: if not force and ftext[cut_point:].find(bytes(text,shutit_global.shutit_global_object.default_encoding)) > 0: return None else: if not force and ftext[cut_point:].find(text) > 0: return None newtext1 = ftext[:cut_point] newtext2 = ftext[cut_point:] else: if shutit_global.shutit_global_object.ispy3: lines = ftext.split(bytes('\n',shutit_global.shutit_global_object.default_encoding)) else: lines = ftext.split('\n') cut_point = 0 line_length = 0 matched = False if not shutit_util.check_regexp(pattern): shutit.fail('Illegal regexp found in change_text call: ' + pattern) # pragma: no cover for line in lines: #Help the user out to make this properly line-oriented pattern_before='' pattern_after='' if not pattern or pattern[0] != '^': pattern_before = '^.*' if not pattern or pattern[-1] != '$': pattern_after = '.*$' new_pattern = pattern_before+pattern+pattern_after if shutit_global.shutit_global_object.ispy3: match = re.search(bytes(new_pattern,shutit_global.shutit_global_object.default_encoding), line) else: match = re.search(new_pattern, line) line_length = len(line) if match != None: matched=True break # Update cut point to next line, including newline in original text cut_point += line_length+1 if not replace and not matched: # No match, return none return None if replace and not matched: cut_point = len(ftext) elif not replace and not before: cut_point += line_length # newtext1 is everything up to the cutpoint newtext1 = ftext[:cut_point] # newtext2 is everything after the cutpoint newtext2 = ftext[cut_point:] # if replacing and we matched the output in a line, then set newtext2 to be everything from cutpoint's line end if replace and matched: newtext2 = ftext[cut_point+line_length:] elif not force: # If the text is already there and we're not forcing it, return None. if shutit_global.shutit_global_object.ispy3: if before and ftext[cut_point-len(text):].find(bytes(text,shutit_global.shutit_global_object.default_encoding)) > 0: return None if not before and ftext[cut_point:].find(bytes(text,shutit_global.shutit_global_object.default_encoding)) > 0: return None else: if before and ftext[cut_point-len(text):].find(text) > 0: return None if not before and ftext[cut_point:].find(text) > 0: return None # Add a newline to newtext1 if it is not already there if shutit_global.shutit_global_object.ispy3: if newtext1 and bytes(newtext1.decode(shutit_global.shutit_global_object.default_encoding)[-1],shutit_global.shutit_global_object.default_encoding) != bytes('\n',shutit_global.shutit_global_object.default_encoding): newtext1 += bytes('\n',shutit_global.shutit_global_object.default_encoding) else: if newtext1 and newtext1[-1] != '\n': newtext1 += '\n' # Add a newline to newtext2 if it is not already there if shutit_global.shutit_global_object.ispy3: if newtext2 and bytes(newtext2.decode(shutit_global.shutit_global_object.default_encoding)[0],shutit_global.shutit_global_object.default_encoding) != bytes('\n',shutit_global.shutit_global_object.default_encoding): newtext2 = bytes('\n',shutit_global.shutit_global_object.default_encoding) + newtext2 else: if newtext2 and newtext2[0] != '\n': newtext2 = '\n' + newtext2 else: # Append to file absent a pattern. cut_point = len(ftext) newtext1 = ftext[:cut_point] newtext2 = ftext[cut_point:] # If adding or replacing at the end of the file, then ensure we have a newline at the end if shutit_global.shutit_global_object.ispy3: if newtext2 == b'' and text and bytes(text[-1],shutit_global.shutit_global_object.default_encoding) != bytes('\n',shutit_global.shutit_global_object.default_encoding): newtext2 = bytes('\n',shutit_global.shutit_global_object.default_encoding) else: if newtext2 == '' and text and text[-1] != '\n': newtext2 = '\n' if shutit_global.shutit_global_object.ispy3: new_text = newtext1 + bytes(text,shutit_global.shutit_global_object.default_encoding) + newtext2 else: new_text = newtext1 + text + newtext2 self.send_file(fname, new_text, truncate=True, loglevel=loglevel) shutit.handle_note_after(note=note) return True
['def', 'change_text', '(', 'self', ',', 'text', ',', 'fname', ',', 'pattern', '=', 'None', ',', 'before', '=', 'False', ',', 'force', '=', 'False', ',', 'delete', '=', 'False', ',', 'note', '=', 'None', ',', 'replace', '=', 'False', ',', 'line_oriented', '=', 'True', ',', 'create', '=', 'True', ',', 'loglevel', '=', 'logging', '.', 'DEBUG', ')', ':', 'shutit', '=', 'self', '.', 'shutit', 'shutit', '.', 'handle_note', '(', 'note', ')', 'fexists', '=', 'self', '.', 'file_exists', '(', 'fname', ')', 'if', 'not', 'fexists', ':', 'if', 'create', ':', 'self', '.', 'send', '(', 'ShutItSendSpec', '(', 'self', ',', 'send', '=', "' command touch '", '+', 'fname', ',', 'echo', '=', 'False', ',', 'loglevel', '=', 'loglevel', ',', 'ignore_background', '=', 'True', ')', ')', 'else', ':', 'shutit', '.', 'fail', '(', 'fname', '+', "' does not exist and create=False'", ')', '# pragma: no cover', 'if', 'replace', ':', '# If replace and no pattern FAIL', 'if', 'not', 'pattern', ':', 'shutit', '.', 'fail', '(', "'replace=True requires a pattern to be passed in'", ')', '# pragma: no cover', '# If replace and delete FAIL', 'if', 'delete', ':', 'shutit', '.', 'fail', '(', "'cannot pass replace=True and delete=True to insert_text'", ')', '# pragma: no cover', "# ftext is the original file's text. If base64 is available, use it to", '# encode the text', 'if', 'self', '.', 'command_available', '(', "'base64'", ')', ':', 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'ftext', '=', 'bytes', '(', 'self', '.', 'send_and_get_output', '(', "' command base64 '", '+', 'fname', ',', 'echo', '=', 'False', ',', 'loglevel', '=', 'loglevel', ')', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', 'else', ':', 'ftext', '=', 'self', '.', 'send_and_get_output', '(', "' command base64 '", '+', 'fname', ',', 'echo', '=', 'False', ',', 'loglevel', '=', 'loglevel', ')', 'ftext', '=', 'base64', '.', 'b64decode', '(', 'ftext', ')', 'else', ':', "# Replace the file text's ^M-newlines with simple newlines", 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'ftext', '=', 'bytes', '(', 'self', '.', 'send_and_get_output', '(', "' command cat '", '+', 'fname', ',', 'echo', '=', 'False', ',', 'loglevel', '=', 'loglevel', ')', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', 'ftext', '=', 'ftext', '.', 'replace', '(', 'bytes', '(', "'\\r\\n'", ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ',', 'bytes', '(', "'\\n'", ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ')', 'else', ':', 'ftext', '=', 'self', '.', 'send_and_get_output', '(', "' command cat '", '+', 'fname', ',', 'echo', '=', 'False', ',', 'loglevel', '=', 'loglevel', ')', 'ftext', '=', 'ftext', '.', 'replace', '(', "'\\r\\n'", ',', "'\\n'", ')', '# Delete the text', 'if', 'delete', ':', 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'loc', '=', 'ftext', '.', 'find', '(', 'bytes', '(', 'text', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ')', 'else', ':', 'loc', '=', 'ftext', '.', 'find', '(', 'text', ')', 'if', 'loc', '==', '-', '1', ':', '# No output - no match', 'return', 'None', 'else', ':', 'new_text', '=', 'ftext', '[', ':', 'loc', ']', '+', 'ftext', '[', 'loc', '+', 'len', '(', 'text', ')', '+', '1', ':', ']', 'else', ':', 'if', 'pattern', '!=', 'None', ':', 'if', 'not', 'line_oriented', ':', 'if', 'not', 'shutit_util', '.', 'check_regexp', '(', 'pattern', ')', ':', 'shutit', '.', 'fail', '(', "'Illegal regexp found in change_text call: '", '+', 'pattern', ')', '# pragma: no cover', '# cf: http://stackoverflow.com/questions/9411041/matching-ranges-of-lines-in-python-like-sed-ranges', 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'sre_match', '=', 're', '.', 'search', '(', 'bytes', '(', 'pattern', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ',', 'ftext', ',', 're', '.', 'DOTALL', '|', 're', '.', 'MULTILINE', ')', 'else', ':', 'sre_match', '=', 're', '.', 'search', '(', 'pattern', ',', 'ftext', ',', 're', '.', 'DOTALL', '|', 're', '.', 'MULTILINE', ')', 'if', 'replace', ':', 'if', 'sre_match', 'is', 'None', ':', 'cut_point', '=', 'len', '(', 'ftext', ')', 'newtext1', '=', 'ftext', '[', ':', 'cut_point', ']', 'newtext2', '=', 'ftext', '[', 'cut_point', ':', ']', 'else', ':', 'cut_point', '=', 'sre_match', '.', 'start', '(', ')', 'cut_point_after', '=', 'sre_match', '.', 'end', '(', ')', 'newtext1', '=', 'ftext', '[', ':', 'cut_point', ']', 'newtext2', '=', 'ftext', '[', 'cut_point_after', ':', ']', 'else', ':', 'if', 'sre_match', 'is', 'None', ':', '# No output - no match', 'return', 'None', 'elif', 'before', ':', 'cut_point', '=', 'sre_match', '.', 'start', '(', ')', "# If the text is already there and we're not forcing it, return None.", 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'if', 'not', 'force', 'and', 'ftext', '[', 'cut_point', '-', 'len', '(', 'text', ')', ':', ']', '.', 'find', '(', 'bytes', '(', 'text', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ')', '>', '0', ':', 'return', 'None', 'else', ':', 'if', 'not', 'force', 'and', 'ftext', '[', 'cut_point', '-', 'len', '(', 'text', ')', ':', ']', '.', 'find', '(', 'text', ')', '>', '0', ':', 'return', 'None', 'else', ':', 'cut_point', '=', 'sre_match', '.', 'end', '(', ')', "# If the text is already there and we're not forcing it, return None.", 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'if', 'not', 'force', 'and', 'ftext', '[', 'cut_point', ':', ']', '.', 'find', '(', 'bytes', '(', 'text', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ')', '>', '0', ':', 'return', 'None', 'else', ':', 'if', 'not', 'force', 'and', 'ftext', '[', 'cut_point', ':', ']', '.', 'find', '(', 'text', ')', '>', '0', ':', 'return', 'None', 'newtext1', '=', 'ftext', '[', ':', 'cut_point', ']', 'newtext2', '=', 'ftext', '[', 'cut_point', ':', ']', 'else', ':', 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'lines', '=', 'ftext', '.', 'split', '(', 'bytes', '(', "'\\n'", ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ')', 'else', ':', 'lines', '=', 'ftext', '.', 'split', '(', "'\\n'", ')', 'cut_point', '=', '0', 'line_length', '=', '0', 'matched', '=', 'False', 'if', 'not', 'shutit_util', '.', 'check_regexp', '(', 'pattern', ')', ':', 'shutit', '.', 'fail', '(', "'Illegal regexp found in change_text call: '", '+', 'pattern', ')', '# pragma: no cover', 'for', 'line', 'in', 'lines', ':', '#Help the user out to make this properly line-oriented', 'pattern_before', '=', "''", 'pattern_after', '=', "''", 'if', 'not', 'pattern', 'or', 'pattern', '[', '0', ']', '!=', "'^'", ':', 'pattern_before', '=', "'^.*'", 'if', 'not', 'pattern', 'or', 'pattern', '[', '-', '1', ']', '!=', "'$'", ':', 'pattern_after', '=', "'.*$'", 'new_pattern', '=', 'pattern_before', '+', 'pattern', '+', 'pattern_after', 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'match', '=', 're', '.', 'search', '(', 'bytes', '(', 'new_pattern', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ',', 'line', ')', 'else', ':', 'match', '=', 're', '.', 'search', '(', 'new_pattern', ',', 'line', ')', 'line_length', '=', 'len', '(', 'line', ')', 'if', 'match', '!=', 'None', ':', 'matched', '=', 'True', 'break', '# Update cut point to next line, including newline in original text', 'cut_point', '+=', 'line_length', '+', '1', 'if', 'not', 'replace', 'and', 'not', 'matched', ':', '# No match, return none', 'return', 'None', 'if', 'replace', 'and', 'not', 'matched', ':', 'cut_point', '=', 'len', '(', 'ftext', ')', 'elif', 'not', 'replace', 'and', 'not', 'before', ':', 'cut_point', '+=', 'line_length', '# newtext1 is everything up to the cutpoint', 'newtext1', '=', 'ftext', '[', ':', 'cut_point', ']', '# newtext2 is everything after the cutpoint', 'newtext2', '=', 'ftext', '[', 'cut_point', ':', ']', "# if replacing and we matched the output in a line, then set newtext2 to be everything from cutpoint's line end", 'if', 'replace', 'and', 'matched', ':', 'newtext2', '=', 'ftext', '[', 'cut_point', '+', 'line_length', ':', ']', 'elif', 'not', 'force', ':', "# If the text is already there and we're not forcing it, return None.", 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'if', 'before', 'and', 'ftext', '[', 'cut_point', '-', 'len', '(', 'text', ')', ':', ']', '.', 'find', '(', 'bytes', '(', 'text', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ')', '>', '0', ':', 'return', 'None', 'if', 'not', 'before', 'and', 'ftext', '[', 'cut_point', ':', ']', '.', 'find', '(', 'bytes', '(', 'text', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ')', '>', '0', ':', 'return', 'None', 'else', ':', 'if', 'before', 'and', 'ftext', '[', 'cut_point', '-', 'len', '(', 'text', ')', ':', ']', '.', 'find', '(', 'text', ')', '>', '0', ':', 'return', 'None', 'if', 'not', 'before', 'and', 'ftext', '[', 'cut_point', ':', ']', '.', 'find', '(', 'text', ')', '>', '0', ':', 'return', 'None', '# Add a newline to newtext1 if it is not already there', 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'if', 'newtext1', 'and', 'bytes', '(', 'newtext1', '.', 'decode', '(', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', '[', '-', '1', ']', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', '!=', 'bytes', '(', "'\\n'", ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ':', 'newtext1', '+=', 'bytes', '(', "'\\n'", ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', 'else', ':', 'if', 'newtext1', 'and', 'newtext1', '[', '-', '1', ']', '!=', "'\\n'", ':', 'newtext1', '+=', "'\\n'", '# Add a newline to newtext2 if it is not already there', 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'if', 'newtext2', 'and', 'bytes', '(', 'newtext2', '.', 'decode', '(', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', '[', '0', ']', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', '!=', 'bytes', '(', "'\\n'", ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ':', 'newtext2', '=', 'bytes', '(', "'\\n'", ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', '+', 'newtext2', 'else', ':', 'if', 'newtext2', 'and', 'newtext2', '[', '0', ']', '!=', "'\\n'", ':', 'newtext2', '=', "'\\n'", '+', 'newtext2', 'else', ':', '# Append to file absent a pattern.', 'cut_point', '=', 'len', '(', 'ftext', ')', 'newtext1', '=', 'ftext', '[', ':', 'cut_point', ']', 'newtext2', '=', 'ftext', '[', 'cut_point', ':', ']', '# If adding or replacing at the end of the file, then ensure we have a newline at the end', 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'if', 'newtext2', '==', "b''", 'and', 'text', 'and', 'bytes', '(', 'text', '[', '-', '1', ']', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', '!=', 'bytes', '(', "'\\n'", ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', ':', 'newtext2', '=', 'bytes', '(', "'\\n'", ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', 'else', ':', 'if', 'newtext2', '==', "''", 'and', 'text', 'and', 'text', '[', '-', '1', ']', '!=', "'\\n'", ':', 'newtext2', '=', "'\\n'", 'if', 'shutit_global', '.', 'shutit_global_object', '.', 'ispy3', ':', 'new_text', '=', 'newtext1', '+', 'bytes', '(', 'text', ',', 'shutit_global', '.', 'shutit_global_object', '.', 'default_encoding', ')', '+', 'newtext2', 'else', ':', 'new_text', '=', 'newtext1', '+', 'text', '+', 'newtext2', 'self', '.', 'send_file', '(', 'fname', ',', 'new_text', ',', 'truncate', '=', 'True', ',', 'loglevel', '=', 'loglevel', ')', 'shutit', '.', 'handle_note_after', '(', 'note', '=', 'note', ')', 'return', 'True']
Change text in a file. Returns None if there was no match for the regexp, True if it was matched and replaced, and False if the file did not exist or there was some other problem. @param text: Text to insert. @param fname: Filename to insert text to @param pattern: Regexp for a line to match and insert after/before/replace. If none, put at end of file. @param before: Whether to place the text before or after the matched text. @param force: Force the insertion even if the text is in the file. @param delete: Delete text from file rather than insert @param replace: Replace matched text with passed-in text. If nothing matches, then append. @param note: See send() @param line_oriented: Consider the pattern on a per-line basis (default True). Can match any continuous section of the line, eg 'b.*d' will match the line: 'abcde' If not line_oriented, the regexp is considered on with the flags re.DOTALL, re.MULTILINE enabled
['Change', 'text', 'in', 'a', 'file', '.']
train
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_pexpect.py#L2340-L2563
4,658
StevenMaude/bbc-radio-tracklisting-downloader
bbc_tracklist.py
save_tag_to_audio_file
def save_tag_to_audio_file(audio_file, tracklisting): """ Saves tag to audio file. """ print("Trying to tag {}".format(audio_file)) f = mediafile.MediaFile(audio_file) if not f.lyrics: print("No tracklisting present. Creating lyrics tag.") f.lyrics = 'Tracklisting' + '\n' + tracklisting elif tracklisting not in f.lyrics: print("Appending tracklisting to existing lyrics tag.") f.lyrics = f.lyrics + '\n\n' + 'Tracklisting' + '\n' + tracklisting else: print("Tracklisting already present. Not modifying file.") raise TagNotNeededError f.save() print("Saved tag to file:", audio_file)
python
def save_tag_to_audio_file(audio_file, tracklisting): """ Saves tag to audio file. """ print("Trying to tag {}".format(audio_file)) f = mediafile.MediaFile(audio_file) if not f.lyrics: print("No tracklisting present. Creating lyrics tag.") f.lyrics = 'Tracklisting' + '\n' + tracklisting elif tracklisting not in f.lyrics: print("Appending tracklisting to existing lyrics tag.") f.lyrics = f.lyrics + '\n\n' + 'Tracklisting' + '\n' + tracklisting else: print("Tracklisting already present. Not modifying file.") raise TagNotNeededError f.save() print("Saved tag to file:", audio_file)
['def', 'save_tag_to_audio_file', '(', 'audio_file', ',', 'tracklisting', ')', ':', 'print', '(', '"Trying to tag {}"', '.', 'format', '(', 'audio_file', ')', ')', 'f', '=', 'mediafile', '.', 'MediaFile', '(', 'audio_file', ')', 'if', 'not', 'f', '.', 'lyrics', ':', 'print', '(', '"No tracklisting present. Creating lyrics tag."', ')', 'f', '.', 'lyrics', '=', "'Tracklisting'", '+', "'\\n'", '+', 'tracklisting', 'elif', 'tracklisting', 'not', 'in', 'f', '.', 'lyrics', ':', 'print', '(', '"Appending tracklisting to existing lyrics tag."', ')', 'f', '.', 'lyrics', '=', 'f', '.', 'lyrics', '+', "'\\n\\n'", '+', "'Tracklisting'", '+', "'\\n'", '+', 'tracklisting', 'else', ':', 'print', '(', '"Tracklisting already present. Not modifying file."', ')', 'raise', 'TagNotNeededError', 'f', '.', 'save', '(', ')', 'print', '(', '"Saved tag to file:"', ',', 'audio_file', ')']
Saves tag to audio file.
['Saves', 'tag', 'to', 'audio', 'file', '.']
train
https://github.com/StevenMaude/bbc-radio-tracklisting-downloader/blob/9fe9096b4d889888f65756444e4fd71352b92458/bbc_tracklist.py#L157-L175
4,659
YosaiProject/yosai
yosai/core/realm/realm.py
AccountStoreRealm.get_authentication_info
def get_authentication_info(self, identifier): """ The default authentication caching policy is to cache an account's credentials that are queried from an account store, for a specific user, so to facilitate any subsequent authentication attempts for that user. Naturally, in order to cache one must have a CacheHandler. If a user were to fail to authenticate, perhaps due to an incorrectly entered password, during the the next authentication attempt (of that user id) the cached account will be readily available from cache and used to match credentials, boosting performance. :returns: an Account object """ account_info = None ch = self.cache_handler def query_authc_info(self): msg = ("Could not obtain cached credentials for [{0}]. " "Will try to acquire credentials from account store." .format(identifier)) logger.debug(msg) # account_info is a dict account_info = self.account_store.get_authc_info(identifier) if account_info is None: msg = "Could not get stored credentials for {0}".format(identifier) raise ValueError(msg) return account_info try: msg2 = ("Attempting to get cached credentials for [{0}]" .format(identifier)) logger.debug(msg2) # account_info is a dict account_info = ch.get_or_create(domain='authentication:' + self.name, identifier=identifier, creator_func=query_authc_info, creator=self) except AttributeError: # this means the cache_handler isn't configured account_info = query_authc_info(self) except ValueError: msg3 = ("No account credentials found for identifiers [{0}]. " "Returning None.".format(identifier)) logger.warning(msg3) if account_info: account_info['account_id'] = SimpleIdentifierCollection(source_name=self.name, identifier=identifier) return account_info
python
def get_authentication_info(self, identifier): """ The default authentication caching policy is to cache an account's credentials that are queried from an account store, for a specific user, so to facilitate any subsequent authentication attempts for that user. Naturally, in order to cache one must have a CacheHandler. If a user were to fail to authenticate, perhaps due to an incorrectly entered password, during the the next authentication attempt (of that user id) the cached account will be readily available from cache and used to match credentials, boosting performance. :returns: an Account object """ account_info = None ch = self.cache_handler def query_authc_info(self): msg = ("Could not obtain cached credentials for [{0}]. " "Will try to acquire credentials from account store." .format(identifier)) logger.debug(msg) # account_info is a dict account_info = self.account_store.get_authc_info(identifier) if account_info is None: msg = "Could not get stored credentials for {0}".format(identifier) raise ValueError(msg) return account_info try: msg2 = ("Attempting to get cached credentials for [{0}]" .format(identifier)) logger.debug(msg2) # account_info is a dict account_info = ch.get_or_create(domain='authentication:' + self.name, identifier=identifier, creator_func=query_authc_info, creator=self) except AttributeError: # this means the cache_handler isn't configured account_info = query_authc_info(self) except ValueError: msg3 = ("No account credentials found for identifiers [{0}]. " "Returning None.".format(identifier)) logger.warning(msg3) if account_info: account_info['account_id'] = SimpleIdentifierCollection(source_name=self.name, identifier=identifier) return account_info
['def', 'get_authentication_info', '(', 'self', ',', 'identifier', ')', ':', 'account_info', '=', 'None', 'ch', '=', 'self', '.', 'cache_handler', 'def', 'query_authc_info', '(', 'self', ')', ':', 'msg', '=', '(', '"Could not obtain cached credentials for [{0}]. "', '"Will try to acquire credentials from account store."', '.', 'format', '(', 'identifier', ')', ')', 'logger', '.', 'debug', '(', 'msg', ')', '# account_info is a dict', 'account_info', '=', 'self', '.', 'account_store', '.', 'get_authc_info', '(', 'identifier', ')', 'if', 'account_info', 'is', 'None', ':', 'msg', '=', '"Could not get stored credentials for {0}"', '.', 'format', '(', 'identifier', ')', 'raise', 'ValueError', '(', 'msg', ')', 'return', 'account_info', 'try', ':', 'msg2', '=', '(', '"Attempting to get cached credentials for [{0}]"', '.', 'format', '(', 'identifier', ')', ')', 'logger', '.', 'debug', '(', 'msg2', ')', '# account_info is a dict', 'account_info', '=', 'ch', '.', 'get_or_create', '(', 'domain', '=', "'authentication:'", '+', 'self', '.', 'name', ',', 'identifier', '=', 'identifier', ',', 'creator_func', '=', 'query_authc_info', ',', 'creator', '=', 'self', ')', 'except', 'AttributeError', ':', "# this means the cache_handler isn't configured", 'account_info', '=', 'query_authc_info', '(', 'self', ')', 'except', 'ValueError', ':', 'msg3', '=', '(', '"No account credentials found for identifiers [{0}]. "', '"Returning None."', '.', 'format', '(', 'identifier', ')', ')', 'logger', '.', 'warning', '(', 'msg3', ')', 'if', 'account_info', ':', 'account_info', '[', "'account_id'", ']', '=', 'SimpleIdentifierCollection', '(', 'source_name', '=', 'self', '.', 'name', ',', 'identifier', '=', 'identifier', ')', 'return', 'account_info']
The default authentication caching policy is to cache an account's credentials that are queried from an account store, for a specific user, so to facilitate any subsequent authentication attempts for that user. Naturally, in order to cache one must have a CacheHandler. If a user were to fail to authenticate, perhaps due to an incorrectly entered password, during the the next authentication attempt (of that user id) the cached account will be readily available from cache and used to match credentials, boosting performance. :returns: an Account object
['The', 'default', 'authentication', 'caching', 'policy', 'is', 'to', 'cache', 'an', 'account', 's', 'credentials', 'that', 'are', 'queried', 'from', 'an', 'account', 'store', 'for', 'a', 'specific', 'user', 'so', 'to', 'facilitate', 'any', 'subsequent', 'authentication', 'attempts', 'for', 'that', 'user', '.', 'Naturally', 'in', 'order', 'to', 'cache', 'one', 'must', 'have', 'a', 'CacheHandler', '.', 'If', 'a', 'user', 'were', 'to', 'fail', 'to', 'authenticate', 'perhaps', 'due', 'to', 'an', 'incorrectly', 'entered', 'password', 'during', 'the', 'the', 'next', 'authentication', 'attempt', '(', 'of', 'that', 'user', 'id', ')', 'the', 'cached', 'account', 'will', 'be', 'readily', 'available', 'from', 'cache', 'and', 'used', 'to', 'match', 'credentials', 'boosting', 'performance', '.']
train
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/realm/realm.py#L145-L199
4,660
rackerlabs/txkazoo
txkazoo/recipe/watchers.py
watch_children
def watch_children(kzclient, path, func, allow_session_lost=True, send_event=False, ChildrenWatch=ChildrenWatch): """ Install a Kazoo :obj:`ChildrenWatch` on the given path. The given `func` will be called in the reactor thread when any children are created or deleted, or if the node itself is deleted. Returns a Deferred which usually has no result, but may fail with an exception if e.g. the path does not exist. """ def wrapped_func(*args, **kwargs): return blockingCallFromThread(kzclient.reactor, func, *args, **kwargs) return deferToThreadPool( kzclient.reactor, kzclient.pool, lambda: ChildrenWatch( kzclient.kazoo_client, path, func=wrapped_func, allow_session_lost=allow_session_lost, send_event=send_event))
python
def watch_children(kzclient, path, func, allow_session_lost=True, send_event=False, ChildrenWatch=ChildrenWatch): """ Install a Kazoo :obj:`ChildrenWatch` on the given path. The given `func` will be called in the reactor thread when any children are created or deleted, or if the node itself is deleted. Returns a Deferred which usually has no result, but may fail with an exception if e.g. the path does not exist. """ def wrapped_func(*args, **kwargs): return blockingCallFromThread(kzclient.reactor, func, *args, **kwargs) return deferToThreadPool( kzclient.reactor, kzclient.pool, lambda: ChildrenWatch( kzclient.kazoo_client, path, func=wrapped_func, allow_session_lost=allow_session_lost, send_event=send_event))
['def', 'watch_children', '(', 'kzclient', ',', 'path', ',', 'func', ',', 'allow_session_lost', '=', 'True', ',', 'send_event', '=', 'False', ',', 'ChildrenWatch', '=', 'ChildrenWatch', ')', ':', 'def', 'wrapped_func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'blockingCallFromThread', '(', 'kzclient', '.', 'reactor', ',', 'func', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'deferToThreadPool', '(', 'kzclient', '.', 'reactor', ',', 'kzclient', '.', 'pool', ',', 'lambda', ':', 'ChildrenWatch', '(', 'kzclient', '.', 'kazoo_client', ',', 'path', ',', 'func', '=', 'wrapped_func', ',', 'allow_session_lost', '=', 'allow_session_lost', ',', 'send_event', '=', 'send_event', ')', ')']
Install a Kazoo :obj:`ChildrenWatch` on the given path. The given `func` will be called in the reactor thread when any children are created or deleted, or if the node itself is deleted. Returns a Deferred which usually has no result, but may fail with an exception if e.g. the path does not exist.
['Install', 'a', 'Kazoo', ':', 'obj', ':', 'ChildrenWatch', 'on', 'the', 'given', 'path', '.']
train
https://github.com/rackerlabs/txkazoo/blob/a0989138cc08df7acd1d410f7e48708553839f46/txkazoo/recipe/watchers.py#L22-L45
4,661
alefnula/tea
tea/console/format.py
table
def table(text): """Format the text as a table. Text in format: first | second row 2 col 1 | 4 Will be formatted as:: +-------------+--------+ | first | second | +-------------+--------+ | row 2 col 1 | 4 | +-------------+--------+ Args: text (str): Text that needs to be formatted. Returns: str: Formatted string. """ def table_bar(col_lengths): return "+-%s-+%s" % ( "-+-".join(["-" * length for length in col_lengths]), os.linesep, ) rows = [] for line in text.splitlines(): rows.append([part.strip() for part in line.split("|")]) max_cols = max(map(len, rows)) col_lengths = [0] * max_cols for row in rows: cols = len(row) if cols < max_cols: row.extend([""] * (max_cols - cols)) for i, col in enumerate(row): col_length = len(col) if col_length > col_lengths[i]: col_lengths[i] = col_length text = table_bar(col_lengths) for i, row in enumerate(rows): cols = [] for i, col in enumerate(row): cols.append(col.ljust(col_lengths[i])) text += "| %s |%s" % (" | ".join(cols), os.linesep) text += table_bar(col_lengths) return text
python
def table(text): """Format the text as a table. Text in format: first | second row 2 col 1 | 4 Will be formatted as:: +-------------+--------+ | first | second | +-------------+--------+ | row 2 col 1 | 4 | +-------------+--------+ Args: text (str): Text that needs to be formatted. Returns: str: Formatted string. """ def table_bar(col_lengths): return "+-%s-+%s" % ( "-+-".join(["-" * length for length in col_lengths]), os.linesep, ) rows = [] for line in text.splitlines(): rows.append([part.strip() for part in line.split("|")]) max_cols = max(map(len, rows)) col_lengths = [0] * max_cols for row in rows: cols = len(row) if cols < max_cols: row.extend([""] * (max_cols - cols)) for i, col in enumerate(row): col_length = len(col) if col_length > col_lengths[i]: col_lengths[i] = col_length text = table_bar(col_lengths) for i, row in enumerate(rows): cols = [] for i, col in enumerate(row): cols.append(col.ljust(col_lengths[i])) text += "| %s |%s" % (" | ".join(cols), os.linesep) text += table_bar(col_lengths) return text
['def', 'table', '(', 'text', ')', ':', 'def', 'table_bar', '(', 'col_lengths', ')', ':', 'return', '"+-%s-+%s"', '%', '(', '"-+-"', '.', 'join', '(', '[', '"-"', '*', 'length', 'for', 'length', 'in', 'col_lengths', ']', ')', ',', 'os', '.', 'linesep', ',', ')', 'rows', '=', '[', ']', 'for', 'line', 'in', 'text', '.', 'splitlines', '(', ')', ':', 'rows', '.', 'append', '(', '[', 'part', '.', 'strip', '(', ')', 'for', 'part', 'in', 'line', '.', 'split', '(', '"|"', ')', ']', ')', 'max_cols', '=', 'max', '(', 'map', '(', 'len', ',', 'rows', ')', ')', 'col_lengths', '=', '[', '0', ']', '*', 'max_cols', 'for', 'row', 'in', 'rows', ':', 'cols', '=', 'len', '(', 'row', ')', 'if', 'cols', '<', 'max_cols', ':', 'row', '.', 'extend', '(', '[', '""', ']', '*', '(', 'max_cols', '-', 'cols', ')', ')', 'for', 'i', ',', 'col', 'in', 'enumerate', '(', 'row', ')', ':', 'col_length', '=', 'len', '(', 'col', ')', 'if', 'col_length', '>', 'col_lengths', '[', 'i', ']', ':', 'col_lengths', '[', 'i', ']', '=', 'col_length', 'text', '=', 'table_bar', '(', 'col_lengths', ')', 'for', 'i', ',', 'row', 'in', 'enumerate', '(', 'rows', ')', ':', 'cols', '=', '[', ']', 'for', 'i', ',', 'col', 'in', 'enumerate', '(', 'row', ')', ':', 'cols', '.', 'append', '(', 'col', '.', 'ljust', '(', 'col_lengths', '[', 'i', ']', ')', ')', 'text', '+=', '"| %s |%s"', '%', '(', '" | "', '.', 'join', '(', 'cols', ')', ',', 'os', '.', 'linesep', ')', 'text', '+=', 'table_bar', '(', 'col_lengths', ')', 'return', 'text']
Format the text as a table. Text in format: first | second row 2 col 1 | 4 Will be formatted as:: +-------------+--------+ | first | second | +-------------+--------+ | row 2 col 1 | 4 | +-------------+--------+ Args: text (str): Text that needs to be formatted. Returns: str: Formatted string.
['Format', 'the', 'text', 'as', 'a', 'table', '.', 'Text', 'in', 'format', ':', 'first', '|', 'second', 'row', '2', 'col', '1', '|', '4', 'Will', 'be', 'formatted', 'as', '::', '+', '-------------', '+', '--------', '+', '|', 'first', '|', 'second', '|', '+', '-------------', '+', '--------', '+', '|', 'row', '2', 'col', '1', '|', '4', '|', '+', '-------------', '+', '--------', '+', 'Args', ':', 'text', '(', 'str', ')', ':', 'Text', 'that', 'needs', 'to', 'be', 'formatted', '.', 'Returns', ':', 'str', ':', 'Formatted', 'string', '.']
train
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/console/format.py#L33-L82
4,662
sloria/doitlive
doitlive/keyboard.py
magicrun
def magicrun( text, shell, prompt_template="default", aliases=None, envvars=None, extra_commands=None, speed=1, test_mode=False, commentecho=False, ): """Echo out each character in ``text`` as keyboard characters are pressed, wait for a RETURN keypress, then run the ``text`` in a shell context. """ goto_regulartype = magictype(text, prompt_template, speed) if goto_regulartype: return goto_regulartype run_command( text, shell, aliases=aliases, envvars=envvars, extra_commands=extra_commands, test_mode=test_mode, ) return goto_regulartype
python
def magicrun( text, shell, prompt_template="default", aliases=None, envvars=None, extra_commands=None, speed=1, test_mode=False, commentecho=False, ): """Echo out each character in ``text`` as keyboard characters are pressed, wait for a RETURN keypress, then run the ``text`` in a shell context. """ goto_regulartype = magictype(text, prompt_template, speed) if goto_regulartype: return goto_regulartype run_command( text, shell, aliases=aliases, envvars=envvars, extra_commands=extra_commands, test_mode=test_mode, ) return goto_regulartype
['def', 'magicrun', '(', 'text', ',', 'shell', ',', 'prompt_template', '=', '"default"', ',', 'aliases', '=', 'None', ',', 'envvars', '=', 'None', ',', 'extra_commands', '=', 'None', ',', 'speed', '=', '1', ',', 'test_mode', '=', 'False', ',', 'commentecho', '=', 'False', ',', ')', ':', 'goto_regulartype', '=', 'magictype', '(', 'text', ',', 'prompt_template', ',', 'speed', ')', 'if', 'goto_regulartype', ':', 'return', 'goto_regulartype', 'run_command', '(', 'text', ',', 'shell', ',', 'aliases', '=', 'aliases', ',', 'envvars', '=', 'envvars', ',', 'extra_commands', '=', 'extra_commands', ',', 'test_mode', '=', 'test_mode', ',', ')', 'return', 'goto_regulartype']
Echo out each character in ``text`` as keyboard characters are pressed, wait for a RETURN keypress, then run the ``text`` in a shell context.
['Echo', 'out', 'each', 'character', 'in', 'text', 'as', 'keyboard', 'characters', 'are', 'pressed', 'wait', 'for', 'a', 'RETURN', 'keypress', 'then', 'run', 'the', 'text', 'in', 'a', 'shell', 'context', '.']
train
https://github.com/sloria/doitlive/blob/baf43f8ad3f2e4593fe21f6af42aedd34ef1efee/doitlive/keyboard.py#L202-L227
4,663
gmr/rejected
rejected/mcp.py
MasterControlProgram.check_process_counts
def check_process_counts(self): """Check for the minimum consumer process levels and start up new processes needed. """ LOGGER.debug('Checking minimum consumer process levels') for name in self.consumers: processes_needed = self.process_spawn_qty(name) if processes_needed: LOGGER.info('Need to spawn %i processes for %s', processes_needed, name) self.start_processes(name, processes_needed)
python
def check_process_counts(self): """Check for the minimum consumer process levels and start up new processes needed. """ LOGGER.debug('Checking minimum consumer process levels') for name in self.consumers: processes_needed = self.process_spawn_qty(name) if processes_needed: LOGGER.info('Need to spawn %i processes for %s', processes_needed, name) self.start_processes(name, processes_needed)
['def', 'check_process_counts', '(', 'self', ')', ':', 'LOGGER', '.', 'debug', '(', "'Checking minimum consumer process levels'", ')', 'for', 'name', 'in', 'self', '.', 'consumers', ':', 'processes_needed', '=', 'self', '.', 'process_spawn_qty', '(', 'name', ')', 'if', 'processes_needed', ':', 'LOGGER', '.', 'info', '(', "'Need to spawn %i processes for %s'", ',', 'processes_needed', ',', 'name', ')', 'self', '.', 'start_processes', '(', 'name', ',', 'processes_needed', ')']
Check for the minimum consumer process levels and start up new processes needed.
['Check', 'for', 'the', 'minimum', 'consumer', 'process', 'levels', 'and', 'start', 'up', 'new', 'processes', 'needed', '.']
train
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/mcp.py#L183-L194
4,664
volfpeter/graphscraper
src/graphscraper/base.py
NodeList.get_node
def get_node(self, index: int) -> Optional[Node]: """ Returns the node with the given index if such a node currently exists in the node list. Arguments: index (int): The index of the queried node. Returns: The node with the given index if such a node currently exists in the node list, `None` otherwise. """ return self._nodes.get(index)
python
def get_node(self, index: int) -> Optional[Node]: """ Returns the node with the given index if such a node currently exists in the node list. Arguments: index (int): The index of the queried node. Returns: The node with the given index if such a node currently exists in the node list, `None` otherwise. """ return self._nodes.get(index)
['def', 'get_node', '(', 'self', ',', 'index', ':', 'int', ')', '->', 'Optional', '[', 'Node', ']', ':', 'return', 'self', '.', '_nodes', '.', 'get', '(', 'index', ')']
Returns the node with the given index if such a node currently exists in the node list. Arguments: index (int): The index of the queried node. Returns: The node with the given index if such a node currently exists in the node list, `None` otherwise.
['Returns', 'the', 'node', 'with', 'the', 'given', 'index', 'if', 'such', 'a', 'node', 'currently', 'exists', 'in', 'the', 'node', 'list', '.', 'Arguments', ':', 'index', '(', 'int', ')', ':', 'The', 'index', 'of', 'the', 'queried', 'node', '.', 'Returns', ':', 'The', 'node', 'with', 'the', 'given', 'index', 'if', 'such', 'a', 'node', 'currently', 'exists', 'in', 'the', 'node', 'list', 'None', 'otherwise', '.']
train
https://github.com/volfpeter/graphscraper/blob/11d407509956a282ee25190ed6491a162fc0fe7f/src/graphscraper/base.py#L317-L328
4,665
KelSolaar/Oncilla
oncilla/reStructuredText_to_html.py
main
def main(): """ Starts the Application. :return: Definition success. :rtype: bool """ args = get_command_line_arguments() args.css_file = args.css_file if foundations.common.path_exists(args.css_file) else CSS_FILE return reStructuredText_to_html(args.input, args.output, args.css_file)
python
def main(): """ Starts the Application. :return: Definition success. :rtype: bool """ args = get_command_line_arguments() args.css_file = args.css_file if foundations.common.path_exists(args.css_file) else CSS_FILE return reStructuredText_to_html(args.input, args.output, args.css_file)
['def', 'main', '(', ')', ':', 'args', '=', 'get_command_line_arguments', '(', ')', 'args', '.', 'css_file', '=', 'args', '.', 'css_file', 'if', 'foundations', '.', 'common', '.', 'path_exists', '(', 'args', '.', 'css_file', ')', 'else', 'CSS_FILE', 'return', 'reStructuredText_to_html', '(', 'args', '.', 'input', ',', 'args', '.', 'output', ',', 'args', '.', 'css_file', ')']
Starts the Application. :return: Definition success. :rtype: bool
['Starts', 'the', 'Application', '.']
train
https://github.com/KelSolaar/Oncilla/blob/2b4db3704cf2c22a09a207681cb041fff555a994/oncilla/reStructuredText_to_html.py#L130-L142
4,666
buildbot/buildbot
master/buildbot/www/hooks/github.py
GitHubEventHandler._get_commit_msg
def _get_commit_msg(self, repo, sha): ''' :param repo: the repo full name, ``{owner}/{project}``. e.g. ``buildbot/buildbot`` ''' headers = { 'User-Agent': 'Buildbot' } if self._token: headers['Authorization'] = 'token ' + self._token url = '/repos/{}/commits/{}'.format(repo, sha) http = yield httpclientservice.HTTPClientService.getService( self.master, self.github_api_endpoint, headers=headers, debug=self.debug, verify=self.verify) res = yield http.get(url) data = yield res.json() msg = data.get('commit', {'message': 'No message field'})['message'] return msg
python
def _get_commit_msg(self, repo, sha): ''' :param repo: the repo full name, ``{owner}/{project}``. e.g. ``buildbot/buildbot`` ''' headers = { 'User-Agent': 'Buildbot' } if self._token: headers['Authorization'] = 'token ' + self._token url = '/repos/{}/commits/{}'.format(repo, sha) http = yield httpclientservice.HTTPClientService.getService( self.master, self.github_api_endpoint, headers=headers, debug=self.debug, verify=self.verify) res = yield http.get(url) data = yield res.json() msg = data.get('commit', {'message': 'No message field'})['message'] return msg
['def', '_get_commit_msg', '(', 'self', ',', 'repo', ',', 'sha', ')', ':', 'headers', '=', '{', "'User-Agent'", ':', "'Buildbot'", '}', 'if', 'self', '.', '_token', ':', 'headers', '[', "'Authorization'", ']', '=', "'token '", '+', 'self', '.', '_token', 'url', '=', "'/repos/{}/commits/{}'", '.', 'format', '(', 'repo', ',', 'sha', ')', 'http', '=', 'yield', 'httpclientservice', '.', 'HTTPClientService', '.', 'getService', '(', 'self', '.', 'master', ',', 'self', '.', 'github_api_endpoint', ',', 'headers', '=', 'headers', ',', 'debug', '=', 'self', '.', 'debug', ',', 'verify', '=', 'self', '.', 'verify', ')', 'res', '=', 'yield', 'http', '.', 'get', '(', 'url', ')', 'data', '=', 'yield', 'res', '.', 'json', '(', ')', 'msg', '=', 'data', '.', 'get', '(', "'commit'", ',', '{', "'message'", ':', "'No message field'", '}', ')', '[', "'message'", ']', 'return', 'msg']
:param repo: the repo full name, ``{owner}/{project}``. e.g. ``buildbot/buildbot``
[':', 'param', 'repo', ':', 'the', 'repo', 'full', 'name', '{', 'owner', '}', '/', '{', 'project', '}', '.', 'e', '.', 'g', '.', 'buildbot', '/', 'buildbot']
train
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/www/hooks/github.py#L225-L243
4,667
flatangle/flatlib
flatlib/dignities/accidental.py
AccidentalDignity.isAuxilied
def isAuxilied(self): """ Returns if the object is separating and applying to a benefic considering good aspects. """ benefics = [const.VENUS, const.JUPITER] return self.__sepApp(benefics, aspList=[0, 60, 120])
python
def isAuxilied(self): """ Returns if the object is separating and applying to a benefic considering good aspects. """ benefics = [const.VENUS, const.JUPITER] return self.__sepApp(benefics, aspList=[0, 60, 120])
['def', 'isAuxilied', '(', 'self', ')', ':', 'benefics', '=', '[', 'const', '.', 'VENUS', ',', 'const', '.', 'JUPITER', ']', 'return', 'self', '.', '__sepApp', '(', 'benefics', ',', 'aspList', '=', '[', '0', ',', '60', ',', '120', ']', ')']
Returns if the object is separating and applying to a benefic considering good aspects.
['Returns', 'if', 'the', 'object', 'is', 'separating', 'and', 'applying', 'to', 'a', 'benefic', 'considering', 'good', 'aspects', '.']
train
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/dignities/accidental.py#L324-L330
4,668
materialsproject/pymatgen
pymatgen/io/abinit/tasks.py
Task.ipath_from_ext
def ipath_from_ext(self, ext): """ Returns the path of the input file with extension ext. Use it when the file does not exist yet. """ return os.path.join(self.workdir, self.prefix.idata + "_" + ext)
python
def ipath_from_ext(self, ext): """ Returns the path of the input file with extension ext. Use it when the file does not exist yet. """ return os.path.join(self.workdir, self.prefix.idata + "_" + ext)
['def', 'ipath_from_ext', '(', 'self', ',', 'ext', ')', ':', 'return', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'workdir', ',', 'self', '.', 'prefix', '.', 'idata', '+', '"_"', '+', 'ext', ')']
Returns the path of the input file with extension ext. Use it when the file does not exist yet.
['Returns', 'the', 'path', 'of', 'the', 'input', 'file', 'with', 'extension', 'ext', '.', 'Use', 'it', 'when', 'the', 'file', 'does', 'not', 'exist', 'yet', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L1482-L1487
4,669
hydraplatform/hydra-base
hydra_base/lib/sharing.py
set_project_permission
def set_project_permission(project_id, usernames, read, write, share,**kwargs): """ Set permissions on a project to a list of users, identifed by their usernames. The read flag ('Y' or 'N') sets read access, the write flag sets write access. If the read flag is 'N', then there is automatically no write access or share access. """ user_id = kwargs.get('user_id') proj_i = _get_project(project_id) #Is the sharing user allowed to share this project? proj_i.check_share_permission(user_id) #You cannot edit something you cannot see. if read == 'N': write = 'N' share = 'N' for username in usernames: user_i = _get_user(username) #The creator of a project must always have read and write access #to their project if proj_i.created_by == user_i.id: raise HydraError("Cannot set permissions on project %s" " for user %s as this user is the creator." % (project_id, username)) proj_i.set_owner(user_i.id, read=read, write=write) for net_i in proj_i.networks: net_i.set_owner(user_i.id, read=read, write=write, share=share) db.DBSession.flush()
python
def set_project_permission(project_id, usernames, read, write, share,**kwargs): """ Set permissions on a project to a list of users, identifed by their usernames. The read flag ('Y' or 'N') sets read access, the write flag sets write access. If the read flag is 'N', then there is automatically no write access or share access. """ user_id = kwargs.get('user_id') proj_i = _get_project(project_id) #Is the sharing user allowed to share this project? proj_i.check_share_permission(user_id) #You cannot edit something you cannot see. if read == 'N': write = 'N' share = 'N' for username in usernames: user_i = _get_user(username) #The creator of a project must always have read and write access #to their project if proj_i.created_by == user_i.id: raise HydraError("Cannot set permissions on project %s" " for user %s as this user is the creator." % (project_id, username)) proj_i.set_owner(user_i.id, read=read, write=write) for net_i in proj_i.networks: net_i.set_owner(user_i.id, read=read, write=write, share=share) db.DBSession.flush()
['def', 'set_project_permission', '(', 'project_id', ',', 'usernames', ',', 'read', ',', 'write', ',', 'share', ',', '*', '*', 'kwargs', ')', ':', 'user_id', '=', 'kwargs', '.', 'get', '(', "'user_id'", ')', 'proj_i', '=', '_get_project', '(', 'project_id', ')', '#Is the sharing user allowed to share this project?', 'proj_i', '.', 'check_share_permission', '(', 'user_id', ')', '#You cannot edit something you cannot see.', 'if', 'read', '==', "'N'", ':', 'write', '=', "'N'", 'share', '=', "'N'", 'for', 'username', 'in', 'usernames', ':', 'user_i', '=', '_get_user', '(', 'username', ')', '#The creator of a project must always have read and write access', '#to their project', 'if', 'proj_i', '.', 'created_by', '==', 'user_i', '.', 'id', ':', 'raise', 'HydraError', '(', '"Cannot set permissions on project %s"', '" for user %s as this user is the creator."', '%', '(', 'project_id', ',', 'username', ')', ')', 'proj_i', '.', 'set_owner', '(', 'user_i', '.', 'id', ',', 'read', '=', 'read', ',', 'write', '=', 'write', ')', 'for', 'net_i', 'in', 'proj_i', '.', 'networks', ':', 'net_i', '.', 'set_owner', '(', 'user_i', '.', 'id', ',', 'read', '=', 'read', ',', 'write', '=', 'write', ',', 'share', '=', 'share', ')', 'db', '.', 'DBSession', '.', 'flush', '(', ')']
Set permissions on a project to a list of users, identifed by their usernames. The read flag ('Y' or 'N') sets read access, the write flag sets write access. If the read flag is 'N', then there is automatically no write access or share access.
['Set', 'permissions', 'on', 'a', 'project', 'to', 'a', 'list', 'of', 'users', 'identifed', 'by', 'their', 'usernames', '.']
train
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/sharing.py#L172-L207
4,670
razorpay/razorpay-python
razorpay/resources/addon.py
Addon.fetch
def fetch(self, addon_id, data={}, **kwargs): """" Fetch addon for given Id Args: addon_id : Id for which addon object has to be retrieved Returns: addon dict for given subscription Id """ return super(Addon, self).fetch(addon_id, data, **kwargs)
python
def fetch(self, addon_id, data={}, **kwargs): """" Fetch addon for given Id Args: addon_id : Id for which addon object has to be retrieved Returns: addon dict for given subscription Id """ return super(Addon, self).fetch(addon_id, data, **kwargs)
['def', 'fetch', '(', 'self', ',', 'addon_id', ',', 'data', '=', '{', '}', ',', '*', '*', 'kwargs', ')', ':', 'return', 'super', '(', 'Addon', ',', 'self', ')', '.', 'fetch', '(', 'addon_id', ',', 'data', ',', '*', '*', 'kwargs', ')']
Fetch addon for given Id Args: addon_id : Id for which addon object has to be retrieved Returns: addon dict for given subscription Id
['Fetch', 'addon', 'for', 'given', 'Id']
train
https://github.com/razorpay/razorpay-python/blob/5bc63fd8452165a4b54556888492e555222c8afe/razorpay/resources/addon.py#L10-L20
4,671
gem/oq-engine
openquake/hazardlib/gsim/frankel_1996.py
FrankelEtAl1996MblgAB1987NSHMP2008.get_mean_and_stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. :raises ValueError: if imt is instance of :class:`openquake.hazardlib.imt.SA` with unsupported period. """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) if imt not in self.IMTS_TABLES: raise ValueError( 'IMT %s not supported in FrankelEtAl1996NSHMP. ' % repr(imt) + 'FrankelEtAl1996NSHMP does not allow interpolation for ' + 'unsupported periods.' ) mean = self._compute_mean(imt, rup.mag, dists.rhypo.copy()) mean = clip_mean(imt, mean) stddevs = self._compute_stddevs(imt, dists.rhypo.shape, stddev_types) return mean, stddevs
python
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. :raises ValueError: if imt is instance of :class:`openquake.hazardlib.imt.SA` with unsupported period. """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) if imt not in self.IMTS_TABLES: raise ValueError( 'IMT %s not supported in FrankelEtAl1996NSHMP. ' % repr(imt) + 'FrankelEtAl1996NSHMP does not allow interpolation for ' + 'unsupported periods.' ) mean = self._compute_mean(imt, rup.mag, dists.rhypo.copy()) mean = clip_mean(imt, mean) stddevs = self._compute_stddevs(imt, dists.rhypo.shape, stddev_types) return mean, stddevs
['def', 'get_mean_and_stddevs', '(', 'self', ',', 'sites', ',', 'rup', ',', 'dists', ',', 'imt', ',', 'stddev_types', ')', ':', 'assert', 'all', '(', 'stddev_type', 'in', 'self', '.', 'DEFINED_FOR_STANDARD_DEVIATION_TYPES', 'for', 'stddev_type', 'in', 'stddev_types', ')', 'if', 'imt', 'not', 'in', 'self', '.', 'IMTS_TABLES', ':', 'raise', 'ValueError', '(', "'IMT %s not supported in FrankelEtAl1996NSHMP. '", '%', 'repr', '(', 'imt', ')', '+', "'FrankelEtAl1996NSHMP does not allow interpolation for '", '+', "'unsupported periods.'", ')', 'mean', '=', 'self', '.', '_compute_mean', '(', 'imt', ',', 'rup', '.', 'mag', ',', 'dists', '.', 'rhypo', '.', 'copy', '(', ')', ')', 'mean', '=', 'clip_mean', '(', 'imt', ',', 'mean', ')', 'stddevs', '=', 'self', '.', '_compute_stddevs', '(', 'imt', ',', 'dists', '.', 'rhypo', '.', 'shape', ',', 'stddev_types', ')', 'return', 'mean', ',', 'stddevs']
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. :raises ValueError: if imt is instance of :class:`openquake.hazardlib.imt.SA` with unsupported period.
['See', ':', 'meth', ':', 'superclass', 'method', '<', '.', 'base', '.', 'GroundShakingIntensityModel', '.', 'get_mean_and_stddevs', '>', 'for', 'spec', 'of', 'input', 'and', 'result', 'values', '.']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/frankel_1996.py#L102-L127
4,672
sorgerlab/indra
indra/databases/ndex_client.py
set_style
def set_style(network_id, ndex_cred=None, template_id=None): """Set the style of the network to a given template network's style Parameters ---------- network_id : str The UUID of the NDEx network whose style is to be changed. ndex_cred : dict A dictionary of NDEx credentials. template_id : Optional[str] The UUID of the NDEx network whose style is used on the network specified in the first argument. """ if not template_id: template_id = "ea4ea3b7-6903-11e7-961c-0ac135e8bacf" server = 'http://public.ndexbio.org' username, password = get_default_ndex_cred(ndex_cred) source_network = ndex2.create_nice_cx_from_server(username=username, password=password, uuid=network_id, server=server) source_network.apply_template(server, template_id) source_network.update_to(network_id, server=server, username=username, password=password)
python
def set_style(network_id, ndex_cred=None, template_id=None): """Set the style of the network to a given template network's style Parameters ---------- network_id : str The UUID of the NDEx network whose style is to be changed. ndex_cred : dict A dictionary of NDEx credentials. template_id : Optional[str] The UUID of the NDEx network whose style is used on the network specified in the first argument. """ if not template_id: template_id = "ea4ea3b7-6903-11e7-961c-0ac135e8bacf" server = 'http://public.ndexbio.org' username, password = get_default_ndex_cred(ndex_cred) source_network = ndex2.create_nice_cx_from_server(username=username, password=password, uuid=network_id, server=server) source_network.apply_template(server, template_id) source_network.update_to(network_id, server=server, username=username, password=password)
['def', 'set_style', '(', 'network_id', ',', 'ndex_cred', '=', 'None', ',', 'template_id', '=', 'None', ')', ':', 'if', 'not', 'template_id', ':', 'template_id', '=', '"ea4ea3b7-6903-11e7-961c-0ac135e8bacf"', 'server', '=', "'http://public.ndexbio.org'", 'username', ',', 'password', '=', 'get_default_ndex_cred', '(', 'ndex_cred', ')', 'source_network', '=', 'ndex2', '.', 'create_nice_cx_from_server', '(', 'username', '=', 'username', ',', 'password', '=', 'password', ',', 'uuid', '=', 'network_id', ',', 'server', '=', 'server', ')', 'source_network', '.', 'apply_template', '(', 'server', ',', 'template_id', ')', 'source_network', '.', 'update_to', '(', 'network_id', ',', 'server', '=', 'server', ',', 'username', '=', 'username', ',', 'password', '=', 'password', ')']
Set the style of the network to a given template network's style Parameters ---------- network_id : str The UUID of the NDEx network whose style is to be changed. ndex_cred : dict A dictionary of NDEx credentials. template_id : Optional[str] The UUID of the NDEx network whose style is used on the network specified in the first argument.
['Set', 'the', 'style', 'of', 'the', 'network', 'to', 'a', 'given', 'template', 'network', 's', 'style']
train
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/ndex_client.py#L192-L219
4,673
inveniosoftware/invenio-files-rest
invenio_files_rest/models.py
BucketTag.get_value
def get_value(cls, bucket, key): """Get tag value.""" obj = cls.get(bucket, key) return obj.value if obj else None
python
def get_value(cls, bucket, key): """Get tag value.""" obj = cls.get(bucket, key) return obj.value if obj else None
['def', 'get_value', '(', 'cls', ',', 'bucket', ',', 'key', ')', ':', 'obj', '=', 'cls', '.', 'get', '(', 'bucket', ',', 'key', ')', 'return', 'obj', '.', 'value', 'if', 'obj', 'else', 'None']
Get tag value.
['Get', 'tag', 'value', '.']
train
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/models.py#L623-L626
4,674
mandiant/ioc_writer
ioc_writer/ioc_api.py
IOC.update_link_rel_based
def update_link_rel_based(self, old_rel, new_rel=None, new_text=None, single_link=False): """ Update link nodes, based on the existing link/@rel values. This requires specifying a link/@rel value to update, and either a new link/@rel value, or a new link/text() value for all links which match the link/@rel value. Optionally, only the first link which matches the link/@rel value will be modified. :param old_rel: The link/@rel value used to select link nodes to update :param new_rel: The new link/@rel value :param new_text: The new link/text() value :param single_link: Determine if only the first, or multiple, linkes are modified. :return: True, unless there are no links with link[@rel='old_rel'] """ links = self.metadata.xpath('./links/link[@rel="{}"]'.format(old_rel)) if len(links) < 1: log.warning('No links with link/[@rel="{}"]'.format(str(old_rel))) return False if new_rel and not new_text: # update link/@rel value for link in links: link.attrib['rel'] = new_rel if single_link: break elif not new_rel and new_text: # update link/@text() value for link in links: link.text = new_text if single_link: break elif new_rel and new_text: log.warning('Cannot update rel and text at the same time') return False else: log.warning('Must specify either new_rel or new_text arguments') return False return True
python
def update_link_rel_based(self, old_rel, new_rel=None, new_text=None, single_link=False): """ Update link nodes, based on the existing link/@rel values. This requires specifying a link/@rel value to update, and either a new link/@rel value, or a new link/text() value for all links which match the link/@rel value. Optionally, only the first link which matches the link/@rel value will be modified. :param old_rel: The link/@rel value used to select link nodes to update :param new_rel: The new link/@rel value :param new_text: The new link/text() value :param single_link: Determine if only the first, or multiple, linkes are modified. :return: True, unless there are no links with link[@rel='old_rel'] """ links = self.metadata.xpath('./links/link[@rel="{}"]'.format(old_rel)) if len(links) < 1: log.warning('No links with link/[@rel="{}"]'.format(str(old_rel))) return False if new_rel and not new_text: # update link/@rel value for link in links: link.attrib['rel'] = new_rel if single_link: break elif not new_rel and new_text: # update link/@text() value for link in links: link.text = new_text if single_link: break elif new_rel and new_text: log.warning('Cannot update rel and text at the same time') return False else: log.warning('Must specify either new_rel or new_text arguments') return False return True
['def', 'update_link_rel_based', '(', 'self', ',', 'old_rel', ',', 'new_rel', '=', 'None', ',', 'new_text', '=', 'None', ',', 'single_link', '=', 'False', ')', ':', 'links', '=', 'self', '.', 'metadata', '.', 'xpath', '(', '\'./links/link[@rel="{}"]\'', '.', 'format', '(', 'old_rel', ')', ')', 'if', 'len', '(', 'links', ')', '<', '1', ':', 'log', '.', 'warning', '(', '\'No links with link/[@rel="{}"]\'', '.', 'format', '(', 'str', '(', 'old_rel', ')', ')', ')', 'return', 'False', 'if', 'new_rel', 'and', 'not', 'new_text', ':', '# update link/@rel value', 'for', 'link', 'in', 'links', ':', 'link', '.', 'attrib', '[', "'rel'", ']', '=', 'new_rel', 'if', 'single_link', ':', 'break', 'elif', 'not', 'new_rel', 'and', 'new_text', ':', '# update link/@text() value', 'for', 'link', 'in', 'links', ':', 'link', '.', 'text', '=', 'new_text', 'if', 'single_link', ':', 'break', 'elif', 'new_rel', 'and', 'new_text', ':', 'log', '.', 'warning', '(', "'Cannot update rel and text at the same time'", ')', 'return', 'False', 'else', ':', 'log', '.', 'warning', '(', "'Must specify either new_rel or new_text arguments'", ')', 'return', 'False', 'return', 'True']
Update link nodes, based on the existing link/@rel values. This requires specifying a link/@rel value to update, and either a new link/@rel value, or a new link/text() value for all links which match the link/@rel value. Optionally, only the first link which matches the link/@rel value will be modified. :param old_rel: The link/@rel value used to select link nodes to update :param new_rel: The new link/@rel value :param new_text: The new link/text() value :param single_link: Determine if only the first, or multiple, linkes are modified. :return: True, unless there are no links with link[@rel='old_rel']
['Update', 'link', 'nodes', 'based', 'on', 'the', 'existing', 'link', '/', '@rel', 'values', '.']
train
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_api.py#L313-L350
4,675
aio-libs/yarl
yarl/__init__.py
URL.human_repr
def human_repr(self): """Return decoded human readable string for URL representation.""" return urlunsplit( SplitResult( self.scheme, self._make_netloc( self.user, self.password, self.host, self._val.port, encode=False ), self.path, self.query_string, self.fragment, ) )
python
def human_repr(self): """Return decoded human readable string for URL representation.""" return urlunsplit( SplitResult( self.scheme, self._make_netloc( self.user, self.password, self.host, self._val.port, encode=False ), self.path, self.query_string, self.fragment, ) )
['def', 'human_repr', '(', 'self', ')', ':', 'return', 'urlunsplit', '(', 'SplitResult', '(', 'self', '.', 'scheme', ',', 'self', '.', '_make_netloc', '(', 'self', '.', 'user', ',', 'self', '.', 'password', ',', 'self', '.', 'host', ',', 'self', '.', '_val', '.', 'port', ',', 'encode', '=', 'False', ')', ',', 'self', '.', 'path', ',', 'self', '.', 'query_string', ',', 'self', '.', 'fragment', ',', ')', ')']
Return decoded human readable string for URL representation.
['Return', 'decoded', 'human', 'readable', 'string', 'for', 'URL', 'representation', '.']
train
https://github.com/aio-libs/yarl/blob/e47da02c00ad764e030ca7647a9565548c97d362/yarl/__init__.py#L968-L981
4,676
saltstack/salt
salt/modules/csf.py
deny
def deny(ip, port=None, proto='tcp', direction='in', port_origin='d', ip_origin='d', ttl=None, comment=''): ''' Add an rule to csf denied hosts See :func:`_access_rule`. 1- Deny an IP: CLI Example: .. code-block:: bash salt '*' csf.deny 127.0.0.1 salt '*' csf.deny 127.0.0.1 comment="Too localhosty" ''' return _access_rule('deny', ip, port, proto, direction, port_origin, ip_origin, comment)
python
def deny(ip, port=None, proto='tcp', direction='in', port_origin='d', ip_origin='d', ttl=None, comment=''): ''' Add an rule to csf denied hosts See :func:`_access_rule`. 1- Deny an IP: CLI Example: .. code-block:: bash salt '*' csf.deny 127.0.0.1 salt '*' csf.deny 127.0.0.1 comment="Too localhosty" ''' return _access_rule('deny', ip, port, proto, direction, port_origin, ip_origin, comment)
['def', 'deny', '(', 'ip', ',', 'port', '=', 'None', ',', 'proto', '=', "'tcp'", ',', 'direction', '=', "'in'", ',', 'port_origin', '=', "'d'", ',', 'ip_origin', '=', "'d'", ',', 'ttl', '=', 'None', ',', 'comment', '=', "''", ')', ':', 'return', '_access_rule', '(', "'deny'", ',', 'ip', ',', 'port', ',', 'proto', ',', 'direction', ',', 'port_origin', ',', 'ip_origin', ',', 'comment', ')']
Add an rule to csf denied hosts See :func:`_access_rule`. 1- Deny an IP: CLI Example: .. code-block:: bash salt '*' csf.deny 127.0.0.1 salt '*' csf.deny 127.0.0.1 comment="Too localhosty"
['Add', 'an', 'rule', 'to', 'csf', 'denied', 'hosts', 'See', ':', 'func', ':', '_access_rule', '.', '1', '-', 'Deny', 'an', 'IP', ':', 'CLI', 'Example', ':']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/csf.py#L450-L469
4,677
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._update_vdr_vxrheadtail
def _update_vdr_vxrheadtail(self, f, vdr_offset, VXRoffset): ''' This sets a VXR to be the first and last VXR in the VDR ''' # VDR's VXRhead self._update_offset_value(f, vdr_offset+28, 8, VXRoffset) # VDR's VXRtail self._update_offset_value(f, vdr_offset+36, 8, VXRoffset)
python
def _update_vdr_vxrheadtail(self, f, vdr_offset, VXRoffset): ''' This sets a VXR to be the first and last VXR in the VDR ''' # VDR's VXRhead self._update_offset_value(f, vdr_offset+28, 8, VXRoffset) # VDR's VXRtail self._update_offset_value(f, vdr_offset+36, 8, VXRoffset)
['def', '_update_vdr_vxrheadtail', '(', 'self', ',', 'f', ',', 'vdr_offset', ',', 'VXRoffset', ')', ':', "# VDR's VXRhead", 'self', '.', '_update_offset_value', '(', 'f', ',', 'vdr_offset', '+', '28', ',', '8', ',', 'VXRoffset', ')', "# VDR's VXRtail", 'self', '.', '_update_offset_value', '(', 'f', ',', 'vdr_offset', '+', '36', ',', '8', ',', 'VXRoffset', ')']
This sets a VXR to be the first and last VXR in the VDR
['This', 'sets', 'a', 'VXR', 'to', 'be', 'the', 'first', 'and', 'last', 'VXR', 'in', 'the', 'VDR']
train
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1286-L1293
4,678
MisterWil/abodepy
abodepy/devices/sensor.py
AbodeSensor._get_numeric_status
def _get_numeric_status(self, key): """Extract the numeric value from the statuses object.""" value = self._get_status(key) if value and any(i.isdigit() for i in value): return float(re.sub("[^0-9.]", "", value)) return None
python
def _get_numeric_status(self, key): """Extract the numeric value from the statuses object.""" value = self._get_status(key) if value and any(i.isdigit() for i in value): return float(re.sub("[^0-9.]", "", value)) return None
['def', '_get_numeric_status', '(', 'self', ',', 'key', ')', ':', 'value', '=', 'self', '.', '_get_status', '(', 'key', ')', 'if', 'value', 'and', 'any', '(', 'i', '.', 'isdigit', '(', ')', 'for', 'i', 'in', 'value', ')', ':', 'return', 'float', '(', 're', '.', 'sub', '(', '"[^0-9.]"', ',', '""', ',', 'value', ')', ')', 'return', 'None']
Extract the numeric value from the statuses object.
['Extract', 'the', 'numeric', 'value', 'from', 'the', 'statuses', 'object', '.']
train
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/sensor.py#L14-L20
4,679
HumanCellAtlas/dcp-cli
hca/dss/__init__.py
DSSClient.download_manifest_v2
def download_manifest_v2(self, manifest, replica, num_retries=10, min_delay_seconds=0.25, download_dir='.'): """ Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. The files are downloaded in the version 2 format. This download format will serve as the main storage format for downloaded files. If a user specifies a different format for download (coming in the future) the files will first be downloaded in this format, then hard-linked to the user's preferred format. :param str manifest: path to a TSV (tab-separated values) file listing files to download :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and `gcp` for Google Cloud Platform. [aws, gcp] :param int num_retries: The initial quota of download failures to accept before exiting due to failures. The number of retries increase and decrease as file chucks succeed and fail. :param float min_delay_seconds: The minimum number of seconds to wait in between retries. Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row must declare the following columns: * `file_uuid` - the UUID of the file in DSS. * `file_version` - the version of the file in DSS. The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is insignificant because the TSV is required to have a header row. """ fieldnames, rows = self._parse_manifest(manifest) errors = 0 with concurrent.futures.ThreadPoolExecutor(max_workers=self.threads) as executor: futures_to_dss_file = {} for row in rows: dss_file = DSSFile.from_manifest_row(row, replica) future = executor.submit(self._download_to_filestore, download_dir, dss_file, num_retries=num_retries, min_delay_seconds=min_delay_seconds) futures_to_dss_file[future] = dss_file for future in concurrent.futures.as_completed(futures_to_dss_file): dss_file = futures_to_dss_file[future] try: future.result() except Exception as e: errors += 1 logger.warning('Failed to download file %s version %s from replica %s', dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e) if errors: raise RuntimeError('{} file(s) failed to download'.format(errors)) else: self._write_output_manifest(manifest, download_dir)
python
def download_manifest_v2(self, manifest, replica, num_retries=10, min_delay_seconds=0.25, download_dir='.'): """ Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. The files are downloaded in the version 2 format. This download format will serve as the main storage format for downloaded files. If a user specifies a different format for download (coming in the future) the files will first be downloaded in this format, then hard-linked to the user's preferred format. :param str manifest: path to a TSV (tab-separated values) file listing files to download :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and `gcp` for Google Cloud Platform. [aws, gcp] :param int num_retries: The initial quota of download failures to accept before exiting due to failures. The number of retries increase and decrease as file chucks succeed and fail. :param float min_delay_seconds: The minimum number of seconds to wait in between retries. Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row must declare the following columns: * `file_uuid` - the UUID of the file in DSS. * `file_version` - the version of the file in DSS. The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is insignificant because the TSV is required to have a header row. """ fieldnames, rows = self._parse_manifest(manifest) errors = 0 with concurrent.futures.ThreadPoolExecutor(max_workers=self.threads) as executor: futures_to_dss_file = {} for row in rows: dss_file = DSSFile.from_manifest_row(row, replica) future = executor.submit(self._download_to_filestore, download_dir, dss_file, num_retries=num_retries, min_delay_seconds=min_delay_seconds) futures_to_dss_file[future] = dss_file for future in concurrent.futures.as_completed(futures_to_dss_file): dss_file = futures_to_dss_file[future] try: future.result() except Exception as e: errors += 1 logger.warning('Failed to download file %s version %s from replica %s', dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e) if errors: raise RuntimeError('{} file(s) failed to download'.format(errors)) else: self._write_output_manifest(manifest, download_dir)
['def', 'download_manifest_v2', '(', 'self', ',', 'manifest', ',', 'replica', ',', 'num_retries', '=', '10', ',', 'min_delay_seconds', '=', '0.25', ',', 'download_dir', '=', "'.'", ')', ':', 'fieldnames', ',', 'rows', '=', 'self', '.', '_parse_manifest', '(', 'manifest', ')', 'errors', '=', '0', 'with', 'concurrent', '.', 'futures', '.', 'ThreadPoolExecutor', '(', 'max_workers', '=', 'self', '.', 'threads', ')', 'as', 'executor', ':', 'futures_to_dss_file', '=', '{', '}', 'for', 'row', 'in', 'rows', ':', 'dss_file', '=', 'DSSFile', '.', 'from_manifest_row', '(', 'row', ',', 'replica', ')', 'future', '=', 'executor', '.', 'submit', '(', 'self', '.', '_download_to_filestore', ',', 'download_dir', ',', 'dss_file', ',', 'num_retries', '=', 'num_retries', ',', 'min_delay_seconds', '=', 'min_delay_seconds', ')', 'futures_to_dss_file', '[', 'future', ']', '=', 'dss_file', 'for', 'future', 'in', 'concurrent', '.', 'futures', '.', 'as_completed', '(', 'futures_to_dss_file', ')', ':', 'dss_file', '=', 'futures_to_dss_file', '[', 'future', ']', 'try', ':', 'future', '.', 'result', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'errors', '+=', '1', 'logger', '.', 'warning', '(', "'Failed to download file %s version %s from replica %s'", ',', 'dss_file', '.', 'uuid', ',', 'dss_file', '.', 'version', ',', 'dss_file', '.', 'replica', ',', 'exc_info', '=', 'e', ')', 'if', 'errors', ':', 'raise', 'RuntimeError', '(', "'{} file(s) failed to download'", '.', 'format', '(', 'errors', ')', ')', 'else', ':', 'self', '.', '_write_output_manifest', '(', 'manifest', ',', 'download_dir', ')']
Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. The files are downloaded in the version 2 format. This download format will serve as the main storage format for downloaded files. If a user specifies a different format for download (coming in the future) the files will first be downloaded in this format, then hard-linked to the user's preferred format. :param str manifest: path to a TSV (tab-separated values) file listing files to download :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and `gcp` for Google Cloud Platform. [aws, gcp] :param int num_retries: The initial quota of download failures to accept before exiting due to failures. The number of retries increase and decrease as file chucks succeed and fail. :param float min_delay_seconds: The minimum number of seconds to wait in between retries. Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row must declare the following columns: * `file_uuid` - the UUID of the file in DSS. * `file_version` - the version of the file in DSS. The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is insignificant because the TSV is required to have a header row.
['Process', 'the', 'given', 'manifest', 'file', 'in', 'TSV', '(', 'tab', '-', 'separated', 'values', ')', 'format', 'and', 'download', 'the', 'files', 'referenced', 'by', 'it', '.', 'The', 'files', 'are', 'downloaded', 'in', 'the', 'version', '2', 'format', '.']
train
https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/dss/__init__.py#L352-L405
4,680
cackharot/suds-py3
suds/servicedefinition.py
ServiceDefinition.nextprefix
def nextprefix(self): """ Get the next available prefix. This means a prefix starting with 'ns' with a number appended as (ns0, ns1, ..) that is not already defined on the wsdl document. """ used = [ns[0] for ns in self.prefixes] used += [ns[0] for ns in self.wsdl.root.nsprefixes.items()] for n in range(0, 1024): p = 'ns%d' % n if p not in used: return p raise Exception('prefixes exhausted')
python
def nextprefix(self): """ Get the next available prefix. This means a prefix starting with 'ns' with a number appended as (ns0, ns1, ..) that is not already defined on the wsdl document. """ used = [ns[0] for ns in self.prefixes] used += [ns[0] for ns in self.wsdl.root.nsprefixes.items()] for n in range(0, 1024): p = 'ns%d' % n if p not in used: return p raise Exception('prefixes exhausted')
['def', 'nextprefix', '(', 'self', ')', ':', 'used', '=', '[', 'ns', '[', '0', ']', 'for', 'ns', 'in', 'self', '.', 'prefixes', ']', 'used', '+=', '[', 'ns', '[', '0', ']', 'for', 'ns', 'in', 'self', '.', 'wsdl', '.', 'root', '.', 'nsprefixes', '.', 'items', '(', ')', ']', 'for', 'n', 'in', 'range', '(', '0', ',', '1024', ')', ':', 'p', '=', "'ns%d'", '%', 'n', 'if', 'p', 'not', 'in', 'used', ':', 'return', 'p', 'raise', 'Exception', '(', "'prefixes exhausted'", ')']
Get the next available prefix. This means a prefix starting with 'ns' with a number appended as (ns0, ns1, ..) that is not already defined on the wsdl document.
['Get', 'the', 'next', 'available', 'prefix', '.', 'This', 'means', 'a', 'prefix', 'starting', 'with', 'ns', 'with', 'a', 'number', 'appended', 'as', '(', 'ns0', 'ns1', '..', ')', 'that', 'is', 'not', 'already', 'defined', 'on', 'the', 'wsdl', 'document', '.']
train
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/servicedefinition.py#L158-L170
4,681
rongcloud/server-sdk-python
rongcloud/message.py
Message.deleteMessage
def deleteMessage(self, date): """ 消息历史记录删除方法(删除 APP 内指定某天某小时内的所有会话消息记录。调用该接口返回成功后,date参数指定的某小时的消息记录文件将在随后的5-10分钟内被永久删除。) 方法 @param date:指定北京时间某天某小时,格式为2014010101,表示:2014年1月1日凌晨1点。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/history/delete.json', params={"date": date}) return Response(r, desc)
python
def deleteMessage(self, date): """ 消息历史记录删除方法(删除 APP 内指定某天某小时内的所有会话消息记录。调用该接口返回成功后,date参数指定的某小时的消息记录文件将在随后的5-10分钟内被永久删除。) 方法 @param date:指定北京时间某天某小时,格式为2014010101,表示:2014年1月1日凌晨1点。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/history/delete.json', params={"date": date}) return Response(r, desc)
['def', 'deleteMessage', '(', 'self', ',', 'date', ')', ':', 'desc', '=', '{', '"name"', ':', '"CodeSuccessReslut"', ',', '"desc"', ':', '" http 成功返回结果",', '', '"fields"', ':', '[', '{', '"name"', ':', '"code"', ',', '"type"', ':', '"Integer"', ',', '"desc"', ':', '"返回码,200 为正常。"', '}', ',', '{', '"name"', ':', '"errorMessage"', ',', '"type"', ':', '"String"', ',', '"desc"', ':', '"错误信息。"', '}', ']', '}', 'r', '=', 'self', '.', 'call_api', '(', 'method', '=', '(', "'API'", ',', "'POST'", ',', "'application/x-www-form-urlencoded'", ')', ',', 'action', '=', "'/message/history/delete.json'", ',', 'params', '=', '{', '"date"', ':', 'date', '}', ')', 'return', 'Response', '(', 'r', ',', 'desc', ')']
消息历史记录删除方法(删除 APP 内指定某天某小时内的所有会话消息记录。调用该接口返回成功后,date参数指定的某小时的消息记录文件将在随后的5-10分钟内被永久删除。) 方法 @param date:指定北京时间某天某小时,格式为2014010101,表示:2014年1月1日凌晨1点。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
['消息历史记录删除方法(删除', 'APP', '内指定某天某小时内的所有会话消息记录。调用该接口返回成功后,date参数指定的某小时的消息记录文件将在随后的5', '-', '10分钟内被永久删除。)', '方法']
train
https://github.com/rongcloud/server-sdk-python/blob/3daadd8b67c84cc5d2a9419e8d45fd69c9baf976/rongcloud/message.py#L402-L428
4,682
Cito/DBUtils
DBUtils/PersistentPg.py
PersistentPg.steady_connection
def steady_connection(self): """Get a steady, non-persistent PyGreSQL connection.""" return SteadyPgConnection( self._maxusage, self._setsession, self._closeable, *self._args, **self._kwargs)
python
def steady_connection(self): """Get a steady, non-persistent PyGreSQL connection.""" return SteadyPgConnection( self._maxusage, self._setsession, self._closeable, *self._args, **self._kwargs)
['def', 'steady_connection', '(', 'self', ')', ':', 'return', 'SteadyPgConnection', '(', 'self', '.', '_maxusage', ',', 'self', '.', '_setsession', ',', 'self', '.', '_closeable', ',', '*', 'self', '.', '_args', ',', '*', '*', 'self', '.', '_kwargs', ')']
Get a steady, non-persistent PyGreSQL connection.
['Get', 'a', 'steady', 'non', '-', 'persistent', 'PyGreSQL', 'connection', '.']
train
https://github.com/Cito/DBUtils/blob/90e8825e038f08c82044b8e50831480175fa026a/DBUtils/PersistentPg.py#L160-L164
4,683
SBRG/ssbio
ssbio/utils.py
flatlist_dropdup
def flatlist_dropdup(list_of_lists): """Make a single list out of a list of lists, and drop all duplicates. Args: list_of_lists: List of lists. Returns: list: List of single objects. """ return list(set([str(item) for sublist in list_of_lists for item in sublist]))
python
def flatlist_dropdup(list_of_lists): """Make a single list out of a list of lists, and drop all duplicates. Args: list_of_lists: List of lists. Returns: list: List of single objects. """ return list(set([str(item) for sublist in list_of_lists for item in sublist]))
['def', 'flatlist_dropdup', '(', 'list_of_lists', ')', ':', 'return', 'list', '(', 'set', '(', '[', 'str', '(', 'item', ')', 'for', 'sublist', 'in', 'list_of_lists', 'for', 'item', 'in', 'sublist', ']', ')', ')']
Make a single list out of a list of lists, and drop all duplicates. Args: list_of_lists: List of lists. Returns: list: List of single objects.
['Make', 'a', 'single', 'list', 'out', 'of', 'a', 'list', 'of', 'lists', 'and', 'drop', 'all', 'duplicates', '.']
train
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L788-L798
4,684
davidemoro/play_mqtt
play_mqtt/providers.py
MQTTProvider.command_publish
def command_publish(self, command, **kwargs): """ Publish a MQTT message """ mqttc = mqtt.Client() mqttc.connect( command['host'], port=int(command['port'])) mqttc.loop_start() try: mqttc.publish( command['endpoint'], command['payload']) finally: mqttc.loop_stop(force=False)
python
def command_publish(self, command, **kwargs): """ Publish a MQTT message """ mqttc = mqtt.Client() mqttc.connect( command['host'], port=int(command['port'])) mqttc.loop_start() try: mqttc.publish( command['endpoint'], command['payload']) finally: mqttc.loop_stop(force=False)
['def', 'command_publish', '(', 'self', ',', 'command', ',', '*', '*', 'kwargs', ')', ':', 'mqttc', '=', 'mqtt', '.', 'Client', '(', ')', 'mqttc', '.', 'connect', '(', 'command', '[', "'host'", ']', ',', 'port', '=', 'int', '(', 'command', '[', "'port'", ']', ')', ')', 'mqttc', '.', 'loop_start', '(', ')', 'try', ':', 'mqttc', '.', 'publish', '(', 'command', '[', "'endpoint'", ']', ',', 'command', '[', "'payload'", ']', ')', 'finally', ':', 'mqttc', '.', 'loop_stop', '(', 'force', '=', 'False', ')']
Publish a MQTT message
['Publish', 'a', 'MQTT', 'message']
train
https://github.com/davidemoro/play_mqtt/blob/4994074c20ab8a5abd221f8b8088e5fc44ba2a5e/play_mqtt/providers.py#L8-L22
4,685
indico/indico-plugins
livesync/indico_livesync/cli.py
available_backends
def available_backends(): """Lists the currently available backend types""" print 'The following LiveSync agents are available:' for name, backend in current_plugin.backend_classes.iteritems(): print cformat(' - %{white!}{}%{reset}: {} ({})').format(name, backend.title, backend.description)
python
def available_backends(): """Lists the currently available backend types""" print 'The following LiveSync agents are available:' for name, backend in current_plugin.backend_classes.iteritems(): print cformat(' - %{white!}{}%{reset}: {} ({})').format(name, backend.title, backend.description)
['def', 'available_backends', '(', ')', ':', 'print', "'The following LiveSync agents are available:'", 'for', 'name', ',', 'backend', 'in', 'current_plugin', '.', 'backend_classes', '.', 'iteritems', '(', ')', ':', 'print', 'cformat', '(', "' - %{white!}{}%{reset}: {} ({})'", ')', '.', 'format', '(', 'name', ',', 'backend', '.', 'title', ',', 'backend', '.', 'description', ')']
Lists the currently available backend types
['Lists', 'the', 'currently', 'available', 'backend', 'types']
train
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/livesync/indico_livesync/cli.py#L37-L41
4,686
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/util.py
get_queue_name
def get_queue_name(queue_name): """Determine which queue MR should run on. How to choose the queue: 1. If user provided one, use that. 2. If we are starting a mr from taskqueue, inherit that queue. If it's a special queue, fall back to the default queue. 3. Default queue. If user is using any MR pipeline interface, pipeline.start takes a "queue_name" argument. The pipeline will run on that queue and MR will simply inherit the queue_name. Args: queue_name: queue_name from user. Maybe None. Returns: The queue name to run on. """ if queue_name: return queue_name queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", parameters.config.QUEUE_NAME) if len(queue_name) > 1 and queue_name[0:2] == "__": # We are currently in some special queue. E.g. __cron. return parameters.config.QUEUE_NAME else: return queue_name
python
def get_queue_name(queue_name): """Determine which queue MR should run on. How to choose the queue: 1. If user provided one, use that. 2. If we are starting a mr from taskqueue, inherit that queue. If it's a special queue, fall back to the default queue. 3. Default queue. If user is using any MR pipeline interface, pipeline.start takes a "queue_name" argument. The pipeline will run on that queue and MR will simply inherit the queue_name. Args: queue_name: queue_name from user. Maybe None. Returns: The queue name to run on. """ if queue_name: return queue_name queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", parameters.config.QUEUE_NAME) if len(queue_name) > 1 and queue_name[0:2] == "__": # We are currently in some special queue. E.g. __cron. return parameters.config.QUEUE_NAME else: return queue_name
['def', 'get_queue_name', '(', 'queue_name', ')', ':', 'if', 'queue_name', ':', 'return', 'queue_name', 'queue_name', '=', 'os', '.', 'environ', '.', 'get', '(', '"HTTP_X_APPENGINE_QUEUENAME"', ',', 'parameters', '.', 'config', '.', 'QUEUE_NAME', ')', 'if', 'len', '(', 'queue_name', ')', '>', '1', 'and', 'queue_name', '[', '0', ':', '2', ']', '==', '"__"', ':', '# We are currently in some special queue. E.g. __cron.', 'return', 'parameters', '.', 'config', '.', 'QUEUE_NAME', 'else', ':', 'return', 'queue_name']
Determine which queue MR should run on. How to choose the queue: 1. If user provided one, use that. 2. If we are starting a mr from taskqueue, inherit that queue. If it's a special queue, fall back to the default queue. 3. Default queue. If user is using any MR pipeline interface, pipeline.start takes a "queue_name" argument. The pipeline will run on that queue and MR will simply inherit the queue_name. Args: queue_name: queue_name from user. Maybe None. Returns: The queue name to run on.
['Determine', 'which', 'queue', 'MR', 'should', 'run', 'on', '.']
train
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/util.py#L127-L154
4,687
QuantEcon/QuantEcon.py
quantecon/lqnash.py
nnash
def nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, beta=1.0, tol=1e-8, max_iter=1000, random_state=None): r""" Compute the limit of a Nash linear quadratic dynamic game. In this problem, player i minimizes .. math:: \sum_{t=0}^{\infty} \left\{ x_t' r_i x_t + 2 x_t' w_i u_{it} +u_{it}' q_i u_{it} + u_{jt}' s_i u_{jt} + 2 u_{jt}' m_i u_{it} \right\} subject to the law of motion .. math:: x_{t+1} = A x_t + b_1 u_{1t} + b_2 u_{2t} and a perceived control law :math:`u_j(t) = - f_j x_t` for the other player. The solution computed in this routine is the :math:`f_i` and :math:`p_i` of the associated double optimal linear regulator problem. Parameters ---------- A : scalar(float) or array_like(float) Corresponds to the above equation, should be of size (n, n) B1 : scalar(float) or array_like(float) As above, size (n, k_1) B2 : scalar(float) or array_like(float) As above, size (n, k_2) R1 : scalar(float) or array_like(float) As above, size (n, n) R2 : scalar(float) or array_like(float) As above, size (n, n) Q1 : scalar(float) or array_like(float) As above, size (k_1, k_1) Q2 : scalar(float) or array_like(float) As above, size (k_2, k_2) S1 : scalar(float) or array_like(float) As above, size (k_1, k_1) S2 : scalar(float) or array_like(float) As above, size (k_2, k_2) W1 : scalar(float) or array_like(float) As above, size (n, k_1) W2 : scalar(float) or array_like(float) As above, size (n, k_2) M1 : scalar(float) or array_like(float) As above, size (k_2, k_1) M2 : scalar(float) or array_like(float) As above, size (k_1, k_2) beta : scalar(float), optional(default=1.0) Discount rate tol : scalar(float), optional(default=1e-8) This is the tolerance level for convergence max_iter : scalar(int), optional(default=1000) This is the maximum number of iteratiosn allowed random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- F1 : array_like, dtype=float, shape=(k_1, n) Feedback law for agent 1 F2 : array_like, dtype=float, shape=(k_2, n) Feedback law for agent 2 P1 : array_like, dtype=float, shape=(n, n) The steady-state solution to the associated discrete matrix Riccati equation for agent 1 P2 : array_like, dtype=float, shape=(n, n) The steady-state solution to the associated discrete matrix Riccati equation for agent 2 """ # == Unload parameters and make sure everything is an array == # params = A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2 params = map(np.asarray, params) A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2 = params # == Multiply A, B1, B2 by sqrt(beta) to enforce discounting == # A, B1, B2 = [np.sqrt(beta) * x for x in (A, B1, B2)] n = A.shape[0] if B1.ndim == 1: k_1 = 1 B1 = np.reshape(B1, (n, 1)) else: k_1 = B1.shape[1] if B2.ndim == 1: k_2 = 1 B2 = np.reshape(B2, (n, 1)) else: k_2 = B2.shape[1] random_state = check_random_state(random_state) v1 = eye(k_1) v2 = eye(k_2) P1 = np.zeros((n, n)) P2 = np.zeros((n, n)) F1 = random_state.randn(k_1, n) F2 = random_state.randn(k_2, n) for it in range(max_iter): # update F10 = F1 F20 = F2 G2 = solve(dot(B2.T, P2.dot(B2))+Q2, v2) G1 = solve(dot(B1.T, P1.dot(B1))+Q1, v1) H2 = dot(G2, B2.T.dot(P2)) H1 = dot(G1, B1.T.dot(P1)) # break up the computation of F1, F2 F1_left = v1 - dot(H1.dot(B2)+G1.dot(M1.T), H2.dot(B1)+G2.dot(M2.T)) F1_right = H1.dot(A)+G1.dot(W1.T) - dot(H1.dot(B2)+G1.dot(M1.T), H2.dot(A)+G2.dot(W2.T)) F1 = solve(F1_left, F1_right) F2 = H2.dot(A)+G2.dot(W2.T) - dot(H2.dot(B1)+G2.dot(M2.T), F1) Lambda1 = A - B2.dot(F2) Lambda2 = A - B1.dot(F1) Pi1 = R1 + dot(F2.T, S1.dot(F2)) Pi2 = R2 + dot(F1.T, S2.dot(F1)) P1 = dot(Lambda1.T, P1.dot(Lambda1)) + Pi1 - \ dot(dot(Lambda1.T, P1.dot(B1)) + W1 - F2.T.dot(M1), F1) P2 = dot(Lambda2.T, P2.dot(Lambda2)) + Pi2 - \ dot(dot(Lambda2.T, P2.dot(B2)) + W2 - F1.T.dot(M2), F2) dd = np.max(np.abs(F10 - F1)) + np.max(np.abs(F20 - F2)) if dd < tol: # success! break else: msg = 'No convergence: Iteration limit of {0} reached in nnash' raise ValueError(msg.format(max_iter)) return F1, F2, P1, P2
python
def nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, beta=1.0, tol=1e-8, max_iter=1000, random_state=None): r""" Compute the limit of a Nash linear quadratic dynamic game. In this problem, player i minimizes .. math:: \sum_{t=0}^{\infty} \left\{ x_t' r_i x_t + 2 x_t' w_i u_{it} +u_{it}' q_i u_{it} + u_{jt}' s_i u_{jt} + 2 u_{jt}' m_i u_{it} \right\} subject to the law of motion .. math:: x_{t+1} = A x_t + b_1 u_{1t} + b_2 u_{2t} and a perceived control law :math:`u_j(t) = - f_j x_t` for the other player. The solution computed in this routine is the :math:`f_i` and :math:`p_i` of the associated double optimal linear regulator problem. Parameters ---------- A : scalar(float) or array_like(float) Corresponds to the above equation, should be of size (n, n) B1 : scalar(float) or array_like(float) As above, size (n, k_1) B2 : scalar(float) or array_like(float) As above, size (n, k_2) R1 : scalar(float) or array_like(float) As above, size (n, n) R2 : scalar(float) or array_like(float) As above, size (n, n) Q1 : scalar(float) or array_like(float) As above, size (k_1, k_1) Q2 : scalar(float) or array_like(float) As above, size (k_2, k_2) S1 : scalar(float) or array_like(float) As above, size (k_1, k_1) S2 : scalar(float) or array_like(float) As above, size (k_2, k_2) W1 : scalar(float) or array_like(float) As above, size (n, k_1) W2 : scalar(float) or array_like(float) As above, size (n, k_2) M1 : scalar(float) or array_like(float) As above, size (k_2, k_1) M2 : scalar(float) or array_like(float) As above, size (k_1, k_2) beta : scalar(float), optional(default=1.0) Discount rate tol : scalar(float), optional(default=1e-8) This is the tolerance level for convergence max_iter : scalar(int), optional(default=1000) This is the maximum number of iteratiosn allowed random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- F1 : array_like, dtype=float, shape=(k_1, n) Feedback law for agent 1 F2 : array_like, dtype=float, shape=(k_2, n) Feedback law for agent 2 P1 : array_like, dtype=float, shape=(n, n) The steady-state solution to the associated discrete matrix Riccati equation for agent 1 P2 : array_like, dtype=float, shape=(n, n) The steady-state solution to the associated discrete matrix Riccati equation for agent 2 """ # == Unload parameters and make sure everything is an array == # params = A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2 params = map(np.asarray, params) A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2 = params # == Multiply A, B1, B2 by sqrt(beta) to enforce discounting == # A, B1, B2 = [np.sqrt(beta) * x for x in (A, B1, B2)] n = A.shape[0] if B1.ndim == 1: k_1 = 1 B1 = np.reshape(B1, (n, 1)) else: k_1 = B1.shape[1] if B2.ndim == 1: k_2 = 1 B2 = np.reshape(B2, (n, 1)) else: k_2 = B2.shape[1] random_state = check_random_state(random_state) v1 = eye(k_1) v2 = eye(k_2) P1 = np.zeros((n, n)) P2 = np.zeros((n, n)) F1 = random_state.randn(k_1, n) F2 = random_state.randn(k_2, n) for it in range(max_iter): # update F10 = F1 F20 = F2 G2 = solve(dot(B2.T, P2.dot(B2))+Q2, v2) G1 = solve(dot(B1.T, P1.dot(B1))+Q1, v1) H2 = dot(G2, B2.T.dot(P2)) H1 = dot(G1, B1.T.dot(P1)) # break up the computation of F1, F2 F1_left = v1 - dot(H1.dot(B2)+G1.dot(M1.T), H2.dot(B1)+G2.dot(M2.T)) F1_right = H1.dot(A)+G1.dot(W1.T) - dot(H1.dot(B2)+G1.dot(M1.T), H2.dot(A)+G2.dot(W2.T)) F1 = solve(F1_left, F1_right) F2 = H2.dot(A)+G2.dot(W2.T) - dot(H2.dot(B1)+G2.dot(M2.T), F1) Lambda1 = A - B2.dot(F2) Lambda2 = A - B1.dot(F1) Pi1 = R1 + dot(F2.T, S1.dot(F2)) Pi2 = R2 + dot(F1.T, S2.dot(F1)) P1 = dot(Lambda1.T, P1.dot(Lambda1)) + Pi1 - \ dot(dot(Lambda1.T, P1.dot(B1)) + W1 - F2.T.dot(M1), F1) P2 = dot(Lambda2.T, P2.dot(Lambda2)) + Pi2 - \ dot(dot(Lambda2.T, P2.dot(B2)) + W2 - F1.T.dot(M2), F2) dd = np.max(np.abs(F10 - F1)) + np.max(np.abs(F20 - F2)) if dd < tol: # success! break else: msg = 'No convergence: Iteration limit of {0} reached in nnash' raise ValueError(msg.format(max_iter)) return F1, F2, P1, P2
['def', 'nnash', '(', 'A', ',', 'B1', ',', 'B2', ',', 'R1', ',', 'R2', ',', 'Q1', ',', 'Q2', ',', 'S1', ',', 'S2', ',', 'W1', ',', 'W2', ',', 'M1', ',', 'M2', ',', 'beta', '=', '1.0', ',', 'tol', '=', '1e-8', ',', 'max_iter', '=', '1000', ',', 'random_state', '=', 'None', ')', ':', '# == Unload parameters and make sure everything is an array == #', 'params', '=', 'A', ',', 'B1', ',', 'B2', ',', 'R1', ',', 'R2', ',', 'Q1', ',', 'Q2', ',', 'S1', ',', 'S2', ',', 'W1', ',', 'W2', ',', 'M1', ',', 'M2', 'params', '=', 'map', '(', 'np', '.', 'asarray', ',', 'params', ')', 'A', ',', 'B1', ',', 'B2', ',', 'R1', ',', 'R2', ',', 'Q1', ',', 'Q2', ',', 'S1', ',', 'S2', ',', 'W1', ',', 'W2', ',', 'M1', ',', 'M2', '=', 'params', '# == Multiply A, B1, B2 by sqrt(beta) to enforce discounting == #', 'A', ',', 'B1', ',', 'B2', '=', '[', 'np', '.', 'sqrt', '(', 'beta', ')', '*', 'x', 'for', 'x', 'in', '(', 'A', ',', 'B1', ',', 'B2', ')', ']', 'n', '=', 'A', '.', 'shape', '[', '0', ']', 'if', 'B1', '.', 'ndim', '==', '1', ':', 'k_1', '=', '1', 'B1', '=', 'np', '.', 'reshape', '(', 'B1', ',', '(', 'n', ',', '1', ')', ')', 'else', ':', 'k_1', '=', 'B1', '.', 'shape', '[', '1', ']', 'if', 'B2', '.', 'ndim', '==', '1', ':', 'k_2', '=', '1', 'B2', '=', 'np', '.', 'reshape', '(', 'B2', ',', '(', 'n', ',', '1', ')', ')', 'else', ':', 'k_2', '=', 'B2', '.', 'shape', '[', '1', ']', 'random_state', '=', 'check_random_state', '(', 'random_state', ')', 'v1', '=', 'eye', '(', 'k_1', ')', 'v2', '=', 'eye', '(', 'k_2', ')', 'P1', '=', 'np', '.', 'zeros', '(', '(', 'n', ',', 'n', ')', ')', 'P2', '=', 'np', '.', 'zeros', '(', '(', 'n', ',', 'n', ')', ')', 'F1', '=', 'random_state', '.', 'randn', '(', 'k_1', ',', 'n', ')', 'F2', '=', 'random_state', '.', 'randn', '(', 'k_2', ',', 'n', ')', 'for', 'it', 'in', 'range', '(', 'max_iter', ')', ':', '# update', 'F10', '=', 'F1', 'F20', '=', 'F2', 'G2', '=', 'solve', '(', 'dot', '(', 'B2', '.', 'T', ',', 'P2', '.', 'dot', '(', 'B2', ')', ')', '+', 'Q2', ',', 'v2', ')', 'G1', '=', 'solve', '(', 'dot', '(', 'B1', '.', 'T', ',', 'P1', '.', 'dot', '(', 'B1', ')', ')', '+', 'Q1', ',', 'v1', ')', 'H2', '=', 'dot', '(', 'G2', ',', 'B2', '.', 'T', '.', 'dot', '(', 'P2', ')', ')', 'H1', '=', 'dot', '(', 'G1', ',', 'B1', '.', 'T', '.', 'dot', '(', 'P1', ')', ')', '# break up the computation of F1, F2', 'F1_left', '=', 'v1', '-', 'dot', '(', 'H1', '.', 'dot', '(', 'B2', ')', '+', 'G1', '.', 'dot', '(', 'M1', '.', 'T', ')', ',', 'H2', '.', 'dot', '(', 'B1', ')', '+', 'G2', '.', 'dot', '(', 'M2', '.', 'T', ')', ')', 'F1_right', '=', 'H1', '.', 'dot', '(', 'A', ')', '+', 'G1', '.', 'dot', '(', 'W1', '.', 'T', ')', '-', 'dot', '(', 'H1', '.', 'dot', '(', 'B2', ')', '+', 'G1', '.', 'dot', '(', 'M1', '.', 'T', ')', ',', 'H2', '.', 'dot', '(', 'A', ')', '+', 'G2', '.', 'dot', '(', 'W2', '.', 'T', ')', ')', 'F1', '=', 'solve', '(', 'F1_left', ',', 'F1_right', ')', 'F2', '=', 'H2', '.', 'dot', '(', 'A', ')', '+', 'G2', '.', 'dot', '(', 'W2', '.', 'T', ')', '-', 'dot', '(', 'H2', '.', 'dot', '(', 'B1', ')', '+', 'G2', '.', 'dot', '(', 'M2', '.', 'T', ')', ',', 'F1', ')', 'Lambda1', '=', 'A', '-', 'B2', '.', 'dot', '(', 'F2', ')', 'Lambda2', '=', 'A', '-', 'B1', '.', 'dot', '(', 'F1', ')', 'Pi1', '=', 'R1', '+', 'dot', '(', 'F2', '.', 'T', ',', 'S1', '.', 'dot', '(', 'F2', ')', ')', 'Pi2', '=', 'R2', '+', 'dot', '(', 'F1', '.', 'T', ',', 'S2', '.', 'dot', '(', 'F1', ')', ')', 'P1', '=', 'dot', '(', 'Lambda1', '.', 'T', ',', 'P1', '.', 'dot', '(', 'Lambda1', ')', ')', '+', 'Pi1', '-', 'dot', '(', 'dot', '(', 'Lambda1', '.', 'T', ',', 'P1', '.', 'dot', '(', 'B1', ')', ')', '+', 'W1', '-', 'F2', '.', 'T', '.', 'dot', '(', 'M1', ')', ',', 'F1', ')', 'P2', '=', 'dot', '(', 'Lambda2', '.', 'T', ',', 'P2', '.', 'dot', '(', 'Lambda2', ')', ')', '+', 'Pi2', '-', 'dot', '(', 'dot', '(', 'Lambda2', '.', 'T', ',', 'P2', '.', 'dot', '(', 'B2', ')', ')', '+', 'W2', '-', 'F1', '.', 'T', '.', 'dot', '(', 'M2', ')', ',', 'F2', ')', 'dd', '=', 'np', '.', 'max', '(', 'np', '.', 'abs', '(', 'F10', '-', 'F1', ')', ')', '+', 'np', '.', 'max', '(', 'np', '.', 'abs', '(', 'F20', '-', 'F2', ')', ')', 'if', 'dd', '<', 'tol', ':', '# success!', 'break', 'else', ':', 'msg', '=', "'No convergence: Iteration limit of {0} reached in nnash'", 'raise', 'ValueError', '(', 'msg', '.', 'format', '(', 'max_iter', ')', ')', 'return', 'F1', ',', 'F2', ',', 'P1', ',', 'P2']
r""" Compute the limit of a Nash linear quadratic dynamic game. In this problem, player i minimizes .. math:: \sum_{t=0}^{\infty} \left\{ x_t' r_i x_t + 2 x_t' w_i u_{it} +u_{it}' q_i u_{it} + u_{jt}' s_i u_{jt} + 2 u_{jt}' m_i u_{it} \right\} subject to the law of motion .. math:: x_{t+1} = A x_t + b_1 u_{1t} + b_2 u_{2t} and a perceived control law :math:`u_j(t) = - f_j x_t` for the other player. The solution computed in this routine is the :math:`f_i` and :math:`p_i` of the associated double optimal linear regulator problem. Parameters ---------- A : scalar(float) or array_like(float) Corresponds to the above equation, should be of size (n, n) B1 : scalar(float) or array_like(float) As above, size (n, k_1) B2 : scalar(float) or array_like(float) As above, size (n, k_2) R1 : scalar(float) or array_like(float) As above, size (n, n) R2 : scalar(float) or array_like(float) As above, size (n, n) Q1 : scalar(float) or array_like(float) As above, size (k_1, k_1) Q2 : scalar(float) or array_like(float) As above, size (k_2, k_2) S1 : scalar(float) or array_like(float) As above, size (k_1, k_1) S2 : scalar(float) or array_like(float) As above, size (k_2, k_2) W1 : scalar(float) or array_like(float) As above, size (n, k_1) W2 : scalar(float) or array_like(float) As above, size (n, k_2) M1 : scalar(float) or array_like(float) As above, size (k_2, k_1) M2 : scalar(float) or array_like(float) As above, size (k_1, k_2) beta : scalar(float), optional(default=1.0) Discount rate tol : scalar(float), optional(default=1e-8) This is the tolerance level for convergence max_iter : scalar(int), optional(default=1000) This is the maximum number of iteratiosn allowed random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- F1 : array_like, dtype=float, shape=(k_1, n) Feedback law for agent 1 F2 : array_like, dtype=float, shape=(k_2, n) Feedback law for agent 2 P1 : array_like, dtype=float, shape=(n, n) The steady-state solution to the associated discrete matrix Riccati equation for agent 1 P2 : array_like, dtype=float, shape=(n, n) The steady-state solution to the associated discrete matrix Riccati equation for agent 2
['r', 'Compute', 'the', 'limit', 'of', 'a', 'Nash', 'linear', 'quadratic', 'dynamic', 'game', '.', 'In', 'this', 'problem', 'player', 'i', 'minimizes']
train
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/lqnash.py#L7-L154
4,688
ryanjdillon/pyotelem
pyotelem/physio_seal.py
dens2lip
def dens2lip(dens_gcm3, dens_lipid=0.9007, dens_prot=1.34, dens_water=0.994, dens_ash=2.3): '''Get percent composition of animal from body density The equation calculating animal density is from Biuw et al. (2003), and default values for component densities are from human studies collected in the book by Moore et al. (1963). Args ---- dens_gcm3: float or ndarray An array of seal densities (g/cm^3). The calculations only yield valid percents with densities between 0.888-1.123 with other parameters left as defaults. dens_lipid: float Density of lipid content in the animal (g/cm^3) dens_prot: float Density of protein content in the animal (g/cm^3) dens_water: float Density of water content in the animal (g/cm^3) dens_ash: float Density of ash content in the animal (g/cm^3) Returns ------- perc_all: pandas.DataFrame Dataframe of components of body composition References ---------- Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of free-ranging seals using simple dive characteristics. Journal of Experimental Biology 206, 3405–3423. doi:10.1242/jeb.00583 Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body Cell Mass and Its Supporting Environment - The Composition in Health and Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p. ISBN:0-7216-6480-6 ''' import numpy # Cast iterables to numpy array if numpy.iterable(dens_gcm3): dens_gcm3 = numpy.asarray(dens_gcm3) # Numerators ad_num = -3.2248 * dens_ash pd_num = -25.2786 * dens_prot wd_num = -71.4966 * dens_water # Denominators ad_den = -0.034 * dens_ash pd_den = -0.2857 * dens_prot wd_den = -0.6803 * dens_water perc_lipid = ((100 * dens_gcm3) + ad_num + pd_num + wd_num) / \ (dens_lipid + ad_den + pd_den + wd_den) return perc_lipid
python
def dens2lip(dens_gcm3, dens_lipid=0.9007, dens_prot=1.34, dens_water=0.994, dens_ash=2.3): '''Get percent composition of animal from body density The equation calculating animal density is from Biuw et al. (2003), and default values for component densities are from human studies collected in the book by Moore et al. (1963). Args ---- dens_gcm3: float or ndarray An array of seal densities (g/cm^3). The calculations only yield valid percents with densities between 0.888-1.123 with other parameters left as defaults. dens_lipid: float Density of lipid content in the animal (g/cm^3) dens_prot: float Density of protein content in the animal (g/cm^3) dens_water: float Density of water content in the animal (g/cm^3) dens_ash: float Density of ash content in the animal (g/cm^3) Returns ------- perc_all: pandas.DataFrame Dataframe of components of body composition References ---------- Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of free-ranging seals using simple dive characteristics. Journal of Experimental Biology 206, 3405–3423. doi:10.1242/jeb.00583 Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body Cell Mass and Its Supporting Environment - The Composition in Health and Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p. ISBN:0-7216-6480-6 ''' import numpy # Cast iterables to numpy array if numpy.iterable(dens_gcm3): dens_gcm3 = numpy.asarray(dens_gcm3) # Numerators ad_num = -3.2248 * dens_ash pd_num = -25.2786 * dens_prot wd_num = -71.4966 * dens_water # Denominators ad_den = -0.034 * dens_ash pd_den = -0.2857 * dens_prot wd_den = -0.6803 * dens_water perc_lipid = ((100 * dens_gcm3) + ad_num + pd_num + wd_num) / \ (dens_lipid + ad_den + pd_den + wd_den) return perc_lipid
['def', 'dens2lip', '(', 'dens_gcm3', ',', 'dens_lipid', '=', '0.9007', ',', 'dens_prot', '=', '1.34', ',', 'dens_water', '=', '0.994', ',', 'dens_ash', '=', '2.3', ')', ':', 'import', 'numpy', '# Cast iterables to numpy array', 'if', 'numpy', '.', 'iterable', '(', 'dens_gcm3', ')', ':', 'dens_gcm3', '=', 'numpy', '.', 'asarray', '(', 'dens_gcm3', ')', '# Numerators', 'ad_num', '=', '-', '3.2248', '*', 'dens_ash', 'pd_num', '=', '-', '25.2786', '*', 'dens_prot', 'wd_num', '=', '-', '71.4966', '*', 'dens_water', '# Denominators', 'ad_den', '=', '-', '0.034', '*', 'dens_ash', 'pd_den', '=', '-', '0.2857', '*', 'dens_prot', 'wd_den', '=', '-', '0.6803', '*', 'dens_water', 'perc_lipid', '=', '(', '(', '100', '*', 'dens_gcm3', ')', '+', 'ad_num', '+', 'pd_num', '+', 'wd_num', ')', '/', '(', 'dens_lipid', '+', 'ad_den', '+', 'pd_den', '+', 'wd_den', ')', 'return', 'perc_lipid']
Get percent composition of animal from body density The equation calculating animal density is from Biuw et al. (2003), and default values for component densities are from human studies collected in the book by Moore et al. (1963). Args ---- dens_gcm3: float or ndarray An array of seal densities (g/cm^3). The calculations only yield valid percents with densities between 0.888-1.123 with other parameters left as defaults. dens_lipid: float Density of lipid content in the animal (g/cm^3) dens_prot: float Density of protein content in the animal (g/cm^3) dens_water: float Density of water content in the animal (g/cm^3) dens_ash: float Density of ash content in the animal (g/cm^3) Returns ------- perc_all: pandas.DataFrame Dataframe of components of body composition References ---------- Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of free-ranging seals using simple dive characteristics. Journal of Experimental Biology 206, 3405–3423. doi:10.1242/jeb.00583 Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body Cell Mass and Its Supporting Environment - The Composition in Health and Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p. ISBN:0-7216-6480-6
['Get', 'percent', 'composition', 'of', 'animal', 'from', 'body', 'density']
train
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/physio_seal.py#L257-L315
4,689
holmes-app/holmes-alf
holmesalf/wrapper.py
AlfAuthNZWrapper.sync_client
def sync_client(self): """Synchronous OAuth 2.0 Bearer client""" if not self._sync_client: self._sync_client = AlfSyncClient( token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'), client_id=self.config.get('OAUTH_CLIENT_ID'), client_secret=self.config.get('OAUTH_CLIENT_SECRET') ) return self._sync_client
python
def sync_client(self): """Synchronous OAuth 2.0 Bearer client""" if not self._sync_client: self._sync_client = AlfSyncClient( token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'), client_id=self.config.get('OAUTH_CLIENT_ID'), client_secret=self.config.get('OAUTH_CLIENT_SECRET') ) return self._sync_client
['def', 'sync_client', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_sync_client', ':', 'self', '.', '_sync_client', '=', 'AlfSyncClient', '(', 'token_endpoint', '=', 'self', '.', 'config', '.', 'get', '(', "'OAUTH_TOKEN_ENDPOINT'", ')', ',', 'client_id', '=', 'self', '.', 'config', '.', 'get', '(', "'OAUTH_CLIENT_ID'", ')', ',', 'client_secret', '=', 'self', '.', 'config', '.', 'get', '(', "'OAUTH_CLIENT_SECRET'", ')', ')', 'return', 'self', '.', '_sync_client']
Synchronous OAuth 2.0 Bearer client
['Synchronous', 'OAuth', '2', '.', '0', 'Bearer', 'client']
train
https://github.com/holmes-app/holmes-alf/blob/4bf891831390ecfae818cf37d8ffc3a76fe9f1ec/holmesalf/wrapper.py#L26-L34
4,690
pudo/normality
normality/encoding.py
normalize_encoding
def normalize_encoding(encoding, default=DEFAULT_ENCODING): """Normalize the encoding name, replace ASCII w/ UTF-8.""" if encoding is None: return default encoding = encoding.lower().strip() if encoding in ['', 'ascii']: return default try: codecs.lookup(encoding) return encoding except LookupError: return default
python
def normalize_encoding(encoding, default=DEFAULT_ENCODING): """Normalize the encoding name, replace ASCII w/ UTF-8.""" if encoding is None: return default encoding = encoding.lower().strip() if encoding in ['', 'ascii']: return default try: codecs.lookup(encoding) return encoding except LookupError: return default
['def', 'normalize_encoding', '(', 'encoding', ',', 'default', '=', 'DEFAULT_ENCODING', ')', ':', 'if', 'encoding', 'is', 'None', ':', 'return', 'default', 'encoding', '=', 'encoding', '.', 'lower', '(', ')', '.', 'strip', '(', ')', 'if', 'encoding', 'in', '[', "''", ',', "'ascii'", ']', ':', 'return', 'default', 'try', ':', 'codecs', '.', 'lookup', '(', 'encoding', ')', 'return', 'encoding', 'except', 'LookupError', ':', 'return', 'default']
Normalize the encoding name, replace ASCII w/ UTF-8.
['Normalize', 'the', 'encoding', 'name', 'replace', 'ASCII', 'w', '/', 'UTF', '-', '8', '.']
train
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/encoding.py#L8-L19
4,691
avihad/twistes
twistes/utilities.py
EsUtils.is_get_query_with_results
def is_get_query_with_results(results): """ :param results: the response from Elasticsearch :return: true if the get query returned a result, false otherwise """ return results and EsConst.FOUND in results and results[EsConst.FOUND] and EsConst.FIELDS in results
python
def is_get_query_with_results(results): """ :param results: the response from Elasticsearch :return: true if the get query returned a result, false otherwise """ return results and EsConst.FOUND in results and results[EsConst.FOUND] and EsConst.FIELDS in results
['def', 'is_get_query_with_results', '(', 'results', ')', ':', 'return', 'results', 'and', 'EsConst', '.', 'FOUND', 'in', 'results', 'and', 'results', '[', 'EsConst', '.', 'FOUND', ']', 'and', 'EsConst', '.', 'FIELDS', 'in', 'results']
:param results: the response from Elasticsearch :return: true if the get query returned a result, false otherwise
[':', 'param', 'results', ':', 'the', 'response', 'from', 'Elasticsearch', ':', 'return', ':', 'true', 'if', 'the', 'get', 'query', 'returned', 'a', 'result', 'false', 'otherwise']
train
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/utilities.py#L39-L44
4,692
prompt-toolkit/pymux
pymux/commands/commands.py
unbind_key
def unbind_key(pymux, variables): """ Remove key binding. """ key = variables['<key>'] needs_prefix = not variables['-n'] pymux.key_bindings_manager.remove_custom_binding( key, needs_prefix=needs_prefix)
python
def unbind_key(pymux, variables): """ Remove key binding. """ key = variables['<key>'] needs_prefix = not variables['-n'] pymux.key_bindings_manager.remove_custom_binding( key, needs_prefix=needs_prefix)
['def', 'unbind_key', '(', 'pymux', ',', 'variables', ')', ':', 'key', '=', 'variables', '[', "'<key>'", ']', 'needs_prefix', '=', 'not', 'variables', '[', "'-n'", ']', 'pymux', '.', 'key_bindings_manager', '.', 'remove_custom_binding', '(', 'key', ',', 'needs_prefix', '=', 'needs_prefix', ')']
Remove key binding.
['Remove', 'key', 'binding', '.']
train
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/commands/commands.py#L496-L504
4,693
splunk/splunk-sdk-python
splunklib/searchcommands/external_search_command.py
execute
def execute(path, argv=None, environ=None, command_class=ExternalSearchCommand): """ :param path: :type path: basestring :param argv: :type: argv: list, tuple, or None :param environ: :type environ: dict :param command_class: External search command class to instantiate and execute. :type command_class: type :return: :rtype: None """ assert issubclass(command_class, ExternalSearchCommand) command_class(path, argv, environ).execute()
python
def execute(path, argv=None, environ=None, command_class=ExternalSearchCommand): """ :param path: :type path: basestring :param argv: :type: argv: list, tuple, or None :param environ: :type environ: dict :param command_class: External search command class to instantiate and execute. :type command_class: type :return: :rtype: None """ assert issubclass(command_class, ExternalSearchCommand) command_class(path, argv, environ).execute()
['def', 'execute', '(', 'path', ',', 'argv', '=', 'None', ',', 'environ', '=', 'None', ',', 'command_class', '=', 'ExternalSearchCommand', ')', ':', 'assert', 'issubclass', '(', 'command_class', ',', 'ExternalSearchCommand', ')', 'command_class', '(', 'path', ',', 'argv', ',', 'environ', ')', '.', 'execute', '(', ')']
:param path: :type path: basestring :param argv: :type: argv: list, tuple, or None :param environ: :type environ: dict :param command_class: External search command class to instantiate and execute. :type command_class: type :return: :rtype: None
[':', 'param', 'path', ':', ':', 'type', 'path', ':', 'basestring', ':', 'param', 'argv', ':', ':', 'type', ':', 'argv', ':', 'list', 'tuple', 'or', 'None', ':', 'param', 'environ', ':', ':', 'type', 'environ', ':', 'dict', ':', 'param', 'command_class', ':', 'External', 'search', 'command', 'class', 'to', 'instantiate', 'and', 'execute', '.', ':', 'type', 'command_class', ':', 'type', ':', 'return', ':', ':', 'rtype', ':', 'None']
train
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/searchcommands/external_search_command.py#L214-L228
4,694
tensorflow/tensor2tensor
tensor2tensor/data_generators/multinli.py
_maybe_download_corpora
def _maybe_download_corpora(tmp_dir): """Download corpora for multinli. Args: tmp_dir: a string Returns: a string """ mnli_filename = "MNLI.zip" mnli_finalpath = os.path.join(tmp_dir, "MNLI") if not tf.gfile.Exists(mnli_finalpath): zip_filepath = generator_utils.maybe_download( tmp_dir, mnli_filename, _MNLI_URL) zip_ref = zipfile.ZipFile(zip_filepath, "r") zip_ref.extractall(tmp_dir) zip_ref.close() return mnli_finalpath
python
def _maybe_download_corpora(tmp_dir): """Download corpora for multinli. Args: tmp_dir: a string Returns: a string """ mnli_filename = "MNLI.zip" mnli_finalpath = os.path.join(tmp_dir, "MNLI") if not tf.gfile.Exists(mnli_finalpath): zip_filepath = generator_utils.maybe_download( tmp_dir, mnli_filename, _MNLI_URL) zip_ref = zipfile.ZipFile(zip_filepath, "r") zip_ref.extractall(tmp_dir) zip_ref.close() return mnli_finalpath
['def', '_maybe_download_corpora', '(', 'tmp_dir', ')', ':', 'mnli_filename', '=', '"MNLI.zip"', 'mnli_finalpath', '=', 'os', '.', 'path', '.', 'join', '(', 'tmp_dir', ',', '"MNLI"', ')', 'if', 'not', 'tf', '.', 'gfile', '.', 'Exists', '(', 'mnli_finalpath', ')', ':', 'zip_filepath', '=', 'generator_utils', '.', 'maybe_download', '(', 'tmp_dir', ',', 'mnli_filename', ',', '_MNLI_URL', ')', 'zip_ref', '=', 'zipfile', '.', 'ZipFile', '(', 'zip_filepath', ',', '"r"', ')', 'zip_ref', '.', 'extractall', '(', 'tmp_dir', ')', 'zip_ref', '.', 'close', '(', ')', 'return', 'mnli_finalpath']
Download corpora for multinli. Args: tmp_dir: a string Returns: a string
['Download', 'corpora', 'for', 'multinli', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multinli.py#L42-L59
4,695
f3at/feat
tools/pep8.py
expand_indent
def expand_indent(line): """ Return the amount of indentation. Tabs are expanded to the next multiple of 8. >>> expand_indent(' ') 4 >>> expand_indent('\\t') 8 >>> expand_indent(' \\t') 8 >>> expand_indent(' \\t') 8 >>> expand_indent(' \\t') 16 """ result = 0 for char in line: if char == '\t': result = result / 8 * 8 + 8 elif char == ' ': result += 1 else: break return result
python
def expand_indent(line): """ Return the amount of indentation. Tabs are expanded to the next multiple of 8. >>> expand_indent(' ') 4 >>> expand_indent('\\t') 8 >>> expand_indent(' \\t') 8 >>> expand_indent(' \\t') 8 >>> expand_indent(' \\t') 16 """ result = 0 for char in line: if char == '\t': result = result / 8 * 8 + 8 elif char == ' ': result += 1 else: break return result
['def', 'expand_indent', '(', 'line', ')', ':', 'result', '=', '0', 'for', 'char', 'in', 'line', ':', 'if', 'char', '==', "'\\t'", ':', 'result', '=', 'result', '/', '8', '*', '8', '+', '8', 'elif', 'char', '==', "' '", ':', 'result', '+=', '1', 'else', ':', 'break', 'return', 'result']
Return the amount of indentation. Tabs are expanded to the next multiple of 8. >>> expand_indent(' ') 4 >>> expand_indent('\\t') 8 >>> expand_indent(' \\t') 8 >>> expand_indent(' \\t') 8 >>> expand_indent(' \\t') 16
['Return', 'the', 'amount', 'of', 'indentation', '.', 'Tabs', 'are', 'expanded', 'to', 'the', 'next', 'multiple', 'of', '8', '.']
train
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/tools/pep8.py#L408-L432
4,696
MisterY/asset-allocation
asset_allocation/formatters.py
AsciiFormatter.__format_row
def __format_row(self, row: AssetAllocationViewModel): """ display-format one row Formats one Asset Class record """ output = "" index = 0 # Name value = row.name # Indent according to depth. for _ in range(0, row.depth): value = f" {value}" output += self.append_text_column(value, index) # Set Allocation value = "" index += 1 if row.set_allocation > 0: value = f"{row.set_allocation:.2f}" output += self.append_num_column(value, index) # Current Allocation value = "" index += 1 if row.curr_allocation > Decimal(0): value = f"{row.curr_allocation:.2f}" output += self.append_num_column(value, index) # Allocation difference, percentage value = "" index += 1 if row.alloc_diff_perc.copy_abs() > Decimal(0): value = f"{row.alloc_diff_perc:.0f} %" output += self.append_num_column(value, index) # Allocated value index += 1 value = "" if row.set_value: value = f"{row.set_value:,.0f}" output += self.append_num_column(value, index) # Current Value index += 1 value = f"{row.curr_value:,.0f}" output += self.append_num_column(value, index) # Value in security's currency. Show only if displaying full model, with stocks. index += 1 if self.full: value = "" if row.curr_value_own_currency: value = f"({row.curr_value_own_currency:,.0f}" value += f" {row.own_currency}" value += ")" output += self.append_num_column(value, index) # https://en.wikipedia.org/wiki/ANSI_escape_code # CSI="\x1B[" # red = 31, green = 32 # output += CSI+"31;40m" + "Colored Text" + CSI + "0m" # Value diff index += 1 value = "" if row.diff_value: value = f"{row.diff_value:,.0f}" # Color the output # value = f"{CSI};40m{value}{CSI};40m" output += self.append_num_column(value, index) return output
python
def __format_row(self, row: AssetAllocationViewModel): """ display-format one row Formats one Asset Class record """ output = "" index = 0 # Name value = row.name # Indent according to depth. for _ in range(0, row.depth): value = f" {value}" output += self.append_text_column(value, index) # Set Allocation value = "" index += 1 if row.set_allocation > 0: value = f"{row.set_allocation:.2f}" output += self.append_num_column(value, index) # Current Allocation value = "" index += 1 if row.curr_allocation > Decimal(0): value = f"{row.curr_allocation:.2f}" output += self.append_num_column(value, index) # Allocation difference, percentage value = "" index += 1 if row.alloc_diff_perc.copy_abs() > Decimal(0): value = f"{row.alloc_diff_perc:.0f} %" output += self.append_num_column(value, index) # Allocated value index += 1 value = "" if row.set_value: value = f"{row.set_value:,.0f}" output += self.append_num_column(value, index) # Current Value index += 1 value = f"{row.curr_value:,.0f}" output += self.append_num_column(value, index) # Value in security's currency. Show only if displaying full model, with stocks. index += 1 if self.full: value = "" if row.curr_value_own_currency: value = f"({row.curr_value_own_currency:,.0f}" value += f" {row.own_currency}" value += ")" output += self.append_num_column(value, index) # https://en.wikipedia.org/wiki/ANSI_escape_code # CSI="\x1B[" # red = 31, green = 32 # output += CSI+"31;40m" + "Colored Text" + CSI + "0m" # Value diff index += 1 value = "" if row.diff_value: value = f"{row.diff_value:,.0f}" # Color the output # value = f"{CSI};40m{value}{CSI};40m" output += self.append_num_column(value, index) return output
['def', '__format_row', '(', 'self', ',', 'row', ':', 'AssetAllocationViewModel', ')', ':', 'output', '=', '""', 'index', '=', '0', '# Name', 'value', '=', 'row', '.', 'name', '# Indent according to depth.', 'for', '_', 'in', 'range', '(', '0', ',', 'row', '.', 'depth', ')', ':', 'value', '=', 'f" {value}"', 'output', '+=', 'self', '.', 'append_text_column', '(', 'value', ',', 'index', ')', '# Set Allocation', 'value', '=', '""', 'index', '+=', '1', 'if', 'row', '.', 'set_allocation', '>', '0', ':', 'value', '=', 'f"{row.set_allocation:.2f}"', 'output', '+=', 'self', '.', 'append_num_column', '(', 'value', ',', 'index', ')', '# Current Allocation', 'value', '=', '""', 'index', '+=', '1', 'if', 'row', '.', 'curr_allocation', '>', 'Decimal', '(', '0', ')', ':', 'value', '=', 'f"{row.curr_allocation:.2f}"', 'output', '+=', 'self', '.', 'append_num_column', '(', 'value', ',', 'index', ')', '# Allocation difference, percentage', 'value', '=', '""', 'index', '+=', '1', 'if', 'row', '.', 'alloc_diff_perc', '.', 'copy_abs', '(', ')', '>', 'Decimal', '(', '0', ')', ':', 'value', '=', 'f"{row.alloc_diff_perc:.0f} %"', 'output', '+=', 'self', '.', 'append_num_column', '(', 'value', ',', 'index', ')', '# Allocated value', 'index', '+=', '1', 'value', '=', '""', 'if', 'row', '.', 'set_value', ':', 'value', '=', 'f"{row.set_value:,.0f}"', 'output', '+=', 'self', '.', 'append_num_column', '(', 'value', ',', 'index', ')', '# Current Value', 'index', '+=', '1', 'value', '=', 'f"{row.curr_value:,.0f}"', 'output', '+=', 'self', '.', 'append_num_column', '(', 'value', ',', 'index', ')', "# Value in security's currency. Show only if displaying full model, with stocks.", 'index', '+=', '1', 'if', 'self', '.', 'full', ':', 'value', '=', '""', 'if', 'row', '.', 'curr_value_own_currency', ':', 'value', '=', 'f"({row.curr_value_own_currency:,.0f}"', 'value', '+=', 'f" {row.own_currency}"', 'value', '+=', '")"', 'output', '+=', 'self', '.', 'append_num_column', '(', 'value', ',', 'index', ')', '# https://en.wikipedia.org/wiki/ANSI_escape_code', '# CSI="\\x1B["', '# red = 31, green = 32', '# output += CSI+"31;40m" + "Colored Text" + CSI + "0m"', '# Value diff', 'index', '+=', '1', 'value', '=', '""', 'if', 'row', '.', 'diff_value', ':', 'value', '=', 'f"{row.diff_value:,.0f}"', '# Color the output', '# value = f"{CSI};40m{value}{CSI};40m"', 'output', '+=', 'self', '.', 'append_num_column', '(', 'value', ',', 'index', ')', 'return', 'output']
display-format one row Formats one Asset Class record
['display', '-', 'format', 'one', 'row', 'Formats', 'one', 'Asset', 'Class', 'record']
train
https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/formatters.py#L52-L122
4,697
tcalmant/ipopo
pelix/utilities.py
SynchronizedClassMethod
def SynchronizedClassMethod(*locks_attr_names, **kwargs): # pylint: disable=C1801 """ A synchronizer decorator for class methods. An AttributeError can be raised at runtime if the given lock attribute doesn't exist or if it is None. If a parameter ``sorted`` is found in ``kwargs`` and its value is True, then the list of locks names will be sorted before locking. :param locks_attr_names: A list of the lock(s) attribute(s) name(s) to be used for synchronization :return: The decorator method, surrounded with the lock """ # Filter the names (remove empty ones) locks_attr_names = [ lock_name for lock_name in locks_attr_names if lock_name ] if not locks_attr_names: raise ValueError("The lock names list can't be empty") if "sorted" not in kwargs or kwargs["sorted"]: # Sort the lock names if requested # (locking always in the same order reduces the risk of dead lock) locks_attr_names = list(locks_attr_names) locks_attr_names.sort() def wrapped(method): """ The wrapping method :param method: The wrapped method :return: The wrapped method :raise AttributeError: The given attribute name doesn't exist """ @functools.wraps(method) def synchronized(self, *args, **kwargs): """ Calls the wrapped method with a lock """ # Raises an AttributeError if needed locks = [getattr(self, attr_name) for attr_name in locks_attr_names] locked = collections.deque() i = 0 try: # Lock for lock in locks: if lock is None: # No lock... raise AttributeError( "Lock '{0}' can't be None in class {1}".format( locks_attr_names[i], type(self).__name__ ) ) # Get the lock i += 1 lock.acquire() locked.appendleft(lock) # Use the method return method(self, *args, **kwargs) finally: # Unlock what has been locked in all cases for lock in locked: lock.release() locked.clear() del locks[:] return synchronized # Return the wrapped method return wrapped
python
def SynchronizedClassMethod(*locks_attr_names, **kwargs): # pylint: disable=C1801 """ A synchronizer decorator for class methods. An AttributeError can be raised at runtime if the given lock attribute doesn't exist or if it is None. If a parameter ``sorted`` is found in ``kwargs`` and its value is True, then the list of locks names will be sorted before locking. :param locks_attr_names: A list of the lock(s) attribute(s) name(s) to be used for synchronization :return: The decorator method, surrounded with the lock """ # Filter the names (remove empty ones) locks_attr_names = [ lock_name for lock_name in locks_attr_names if lock_name ] if not locks_attr_names: raise ValueError("The lock names list can't be empty") if "sorted" not in kwargs or kwargs["sorted"]: # Sort the lock names if requested # (locking always in the same order reduces the risk of dead lock) locks_attr_names = list(locks_attr_names) locks_attr_names.sort() def wrapped(method): """ The wrapping method :param method: The wrapped method :return: The wrapped method :raise AttributeError: The given attribute name doesn't exist """ @functools.wraps(method) def synchronized(self, *args, **kwargs): """ Calls the wrapped method with a lock """ # Raises an AttributeError if needed locks = [getattr(self, attr_name) for attr_name in locks_attr_names] locked = collections.deque() i = 0 try: # Lock for lock in locks: if lock is None: # No lock... raise AttributeError( "Lock '{0}' can't be None in class {1}".format( locks_attr_names[i], type(self).__name__ ) ) # Get the lock i += 1 lock.acquire() locked.appendleft(lock) # Use the method return method(self, *args, **kwargs) finally: # Unlock what has been locked in all cases for lock in locked: lock.release() locked.clear() del locks[:] return synchronized # Return the wrapped method return wrapped
['def', 'SynchronizedClassMethod', '(', '*', 'locks_attr_names', ',', '*', '*', 'kwargs', ')', ':', '# pylint: disable=C1801', '# Filter the names (remove empty ones)', 'locks_attr_names', '=', '[', 'lock_name', 'for', 'lock_name', 'in', 'locks_attr_names', 'if', 'lock_name', ']', 'if', 'not', 'locks_attr_names', ':', 'raise', 'ValueError', '(', '"The lock names list can\'t be empty"', ')', 'if', '"sorted"', 'not', 'in', 'kwargs', 'or', 'kwargs', '[', '"sorted"', ']', ':', '# Sort the lock names if requested', '# (locking always in the same order reduces the risk of dead lock)', 'locks_attr_names', '=', 'list', '(', 'locks_attr_names', ')', 'locks_attr_names', '.', 'sort', '(', ')', 'def', 'wrapped', '(', 'method', ')', ':', '"""\n The wrapping method\n\n :param method: The wrapped method\n :return: The wrapped method\n :raise AttributeError: The given attribute name doesn\'t exist\n """', '@', 'functools', '.', 'wraps', '(', 'method', ')', 'def', 'synchronized', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '"""\n Calls the wrapped method with a lock\n """', '# Raises an AttributeError if needed', 'locks', '=', '[', 'getattr', '(', 'self', ',', 'attr_name', ')', 'for', 'attr_name', 'in', 'locks_attr_names', ']', 'locked', '=', 'collections', '.', 'deque', '(', ')', 'i', '=', '0', 'try', ':', '# Lock', 'for', 'lock', 'in', 'locks', ':', 'if', 'lock', 'is', 'None', ':', '# No lock...', 'raise', 'AttributeError', '(', '"Lock \'{0}\' can\'t be None in class {1}"', '.', 'format', '(', 'locks_attr_names', '[', 'i', ']', ',', 'type', '(', 'self', ')', '.', '__name__', ')', ')', '# Get the lock', 'i', '+=', '1', 'lock', '.', 'acquire', '(', ')', 'locked', '.', 'appendleft', '(', 'lock', ')', '# Use the method', 'return', 'method', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'finally', ':', '# Unlock what has been locked in all cases', 'for', 'lock', 'in', 'locked', ':', 'lock', '.', 'release', '(', ')', 'locked', '.', 'clear', '(', ')', 'del', 'locks', '[', ':', ']', 'return', 'synchronized', '# Return the wrapped method', 'return', 'wrapped']
A synchronizer decorator for class methods. An AttributeError can be raised at runtime if the given lock attribute doesn't exist or if it is None. If a parameter ``sorted`` is found in ``kwargs`` and its value is True, then the list of locks names will be sorted before locking. :param locks_attr_names: A list of the lock(s) attribute(s) name(s) to be used for synchronization :return: The decorator method, surrounded with the lock
['A', 'synchronizer', 'decorator', 'for', 'class', 'methods', '.', 'An', 'AttributeError', 'can', 'be', 'raised', 'at', 'runtime', 'if', 'the', 'given', 'lock', 'attribute', 'doesn', 't', 'exist', 'or', 'if', 'it', 'is', 'None', '.']
train
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/utilities.py#L250-L326
4,698
molmod/molmod
molmod/ic.py
dot
def dot(r1, r2): """Compute the dot product Arguments: | ``r1``, ``r2`` -- two :class:`Vector3` objects (Returns a Scalar) """ if r1.size != r2.size: raise ValueError("Both arguments must have the same input size.") if r1.deriv != r2.deriv: raise ValueError("Both arguments must have the same deriv.") return r1.x*r2.x + r1.y*r2.y + r1.z*r2.z
python
def dot(r1, r2): """Compute the dot product Arguments: | ``r1``, ``r2`` -- two :class:`Vector3` objects (Returns a Scalar) """ if r1.size != r2.size: raise ValueError("Both arguments must have the same input size.") if r1.deriv != r2.deriv: raise ValueError("Both arguments must have the same deriv.") return r1.x*r2.x + r1.y*r2.y + r1.z*r2.z
['def', 'dot', '(', 'r1', ',', 'r2', ')', ':', 'if', 'r1', '.', 'size', '!=', 'r2', '.', 'size', ':', 'raise', 'ValueError', '(', '"Both arguments must have the same input size."', ')', 'if', 'r1', '.', 'deriv', '!=', 'r2', '.', 'deriv', ':', 'raise', 'ValueError', '(', '"Both arguments must have the same deriv."', ')', 'return', 'r1', '.', 'x', '*', 'r2', '.', 'x', '+', 'r1', '.', 'y', '*', 'r2', '.', 'y', '+', 'r1', '.', 'z', '*', 'r2', '.', 'z']
Compute the dot product Arguments: | ``r1``, ``r2`` -- two :class:`Vector3` objects (Returns a Scalar)
['Compute', 'the', 'dot', 'product']
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/ic.py#L283-L295
4,699
mgagne/wafflehaus.iweb
wafflehaus/iweb/keystone/user_filter/blacklist.py
filter_factory
def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def blacklist(app): return BlacklistFilter(app, conf) return blacklist
python
def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def blacklist(app): return BlacklistFilter(app, conf) return blacklist
['def', 'filter_factory', '(', 'global_conf', ',', '*', '*', 'local_conf', ')', ':', 'conf', '=', 'global_conf', '.', 'copy', '(', ')', 'conf', '.', 'update', '(', 'local_conf', ')', 'def', 'blacklist', '(', 'app', ')', ':', 'return', 'BlacklistFilter', '(', 'app', ',', 'conf', ')', 'return', 'blacklist']
Returns a WSGI filter app for use with paste.deploy.
['Returns', 'a', 'WSGI', 'filter', 'app', 'for', 'use', 'with', 'paste', '.', 'deploy', '.']
train
https://github.com/mgagne/wafflehaus.iweb/blob/8ac625582c1180391fe022d1db19f70a2dfb376a/wafflehaus/iweb/keystone/user_filter/blacklist.py#L89-L96