repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
BlueBrain/hpcbench
hpcbench/toolbox/edsl.py
kwargsql.xor
def xor(cls, obj, **kwargs): """Query an object. :param obj: object to test :param kwargs: query specified in kwargssql :return: `True` if exactly one `kwargs` expression is `True`, `False` otherwise. :rtype: bool """ return cls.__eval_seqexp(obj, operator.xor, **kwargs)
python
def xor(cls, obj, **kwargs): """Query an object. :param obj: object to test :param kwargs: query specified in kwargssql :return: `True` if exactly one `kwargs` expression is `True`, `False` otherwise. :rtype: bool """ return cls.__eval_seqexp(obj, operator.xor, **kwargs)
[ "def", "xor", "(", "cls", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "return", "cls", ".", "__eval_seqexp", "(", "obj", ",", "operator", ".", "xor", ",", "*", "*", "kwargs", ")" ]
Query an object. :param obj: object to test :param kwargs: query specified in kwargssql :return: `True` if exactly one `kwargs` expression is `True`, `False` otherwise. :rtype: bool
[ "Query", "an", "object", "." ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/edsl.py#L158-L171
BlueBrain/hpcbench
hpcbench/toolbox/edsl.py
kwargsql.__resolve_path
def __resolve_path(cls, obj, path): """Follow a kwargsql expression starting from a given object and return the deduced object. :param obj: the object to start from :param list path: list of operations to perform. It does not contain the optional operation of a traditional kwargsql expression. :return: the found object if any, `None` otherwise. For instance: >>> __resolve_path(dict(foo=dict(bar=42)), ['foo', 'bar']) >>> 42 """ path = [p for p in path if p] if any(path): pathes = len(path) i = 0 while i < pathes: # _get_obj_attr can supersede `i` because it might # evaluate the entire expression by itself. obj, i = cls._get_obj_attr(obj, path, i) i += 1 else: raise Exception("Nothing to do") return obj
python
def __resolve_path(cls, obj, path): """Follow a kwargsql expression starting from a given object and return the deduced object. :param obj: the object to start from :param list path: list of operations to perform. It does not contain the optional operation of a traditional kwargsql expression. :return: the found object if any, `None` otherwise. For instance: >>> __resolve_path(dict(foo=dict(bar=42)), ['foo', 'bar']) >>> 42 """ path = [p for p in path if p] if any(path): pathes = len(path) i = 0 while i < pathes: # _get_obj_attr can supersede `i` because it might # evaluate the entire expression by itself. obj, i = cls._get_obj_attr(obj, path, i) i += 1 else: raise Exception("Nothing to do") return obj
[ "def", "__resolve_path", "(", "cls", ",", "obj", ",", "path", ")", ":", "path", "=", "[", "p", "for", "p", "in", "path", "if", "p", "]", "if", "any", "(", "path", ")", ":", "pathes", "=", "len", "(", "path", ")", "i", "=", "0", "while", "i", "<", "pathes", ":", "# _get_obj_attr can supersede `i` because it might", "# evaluate the entire expression by itself.", "obj", ",", "i", "=", "cls", ".", "_get_obj_attr", "(", "obj", ",", "path", ",", "i", ")", "i", "+=", "1", "else", ":", "raise", "Exception", "(", "\"Nothing to do\"", ")", "return", "obj" ]
Follow a kwargsql expression starting from a given object and return the deduced object. :param obj: the object to start from :param list path: list of operations to perform. It does not contain the optional operation of a traditional kwargsql expression. :return: the found object if any, `None` otherwise. For instance: >>> __resolve_path(dict(foo=dict(bar=42)), ['foo', 'bar']) >>> 42
[ "Follow", "a", "kwargsql", "expression", "starting", "from", "a", "given", "object", "and", "return", "the", "deduced", "object", "." ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/edsl.py#L208-L234
BlueBrain/hpcbench
hpcbench/toolbox/edsl.py
kwargsql._sequence_map
def _sequence_map(cls, seq, path): """Apply a kwargsql expression to every item of a sequence, and returns it. :param seq: the list to transform :param path: kwargsql expression to apply to every elements of the given sequence. """ if not any(path): # There is no further kwargsql expression return seq result = [] for item in seq: try: result.append(cls.__resolve_path(item, path)) except (KeyError, IndexError): pass return result
python
def _sequence_map(cls, seq, path): """Apply a kwargsql expression to every item of a sequence, and returns it. :param seq: the list to transform :param path: kwargsql expression to apply to every elements of the given sequence. """ if not any(path): # There is no further kwargsql expression return seq result = [] for item in seq: try: result.append(cls.__resolve_path(item, path)) except (KeyError, IndexError): pass return result
[ "def", "_sequence_map", "(", "cls", ",", "seq", ",", "path", ")", ":", "if", "not", "any", "(", "path", ")", ":", "# There is no further kwargsql expression", "return", "seq", "result", "=", "[", "]", "for", "item", "in", "seq", ":", "try", ":", "result", ".", "append", "(", "cls", ".", "__resolve_path", "(", "item", ",", "path", ")", ")", "except", "(", "KeyError", ",", "IndexError", ")", ":", "pass", "return", "result" ]
Apply a kwargsql expression to every item of a sequence, and returns it. :param seq: the list to transform :param path: kwargsql expression to apply to every elements of the given sequence.
[ "Apply", "a", "kwargsql", "expression", "to", "every", "item", "of", "a", "sequence", "and", "returns", "it", "." ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/edsl.py#L270-L287
BlueBrain/hpcbench
hpcbench/toolbox/edsl.py
kwargsql._not
def _not(cls, operation): """not operation""" def _wrap(*args, **kwargs): return not operation(*args, **kwargs) return _wrap
python
def _not(cls, operation): """not operation""" def _wrap(*args, **kwargs): return not operation(*args, **kwargs) return _wrap
[ "def", "_not", "(", "cls", ",", "operation", ")", ":", "def", "_wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "not", "operation", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_wrap" ]
not operation
[ "not", "operation" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/edsl.py#L290-L296
PolyJIT/benchbuild
benchbuild/project.py
populate
def populate(projects_to_filter=None, group=None): """ Populate the list of projects that belong to this experiment. Args: projects_to_filter (list(Project)): List of projects we want to assign to this experiment. We intersect the list of projects with the list of supported projects to get the list of projects that belong to this experiment. group (list(str)): In addition to the project filter, we provide a way to filter whole groups. """ if projects_to_filter is None: projects_to_filter = [] import benchbuild.projects as all_projects all_projects.discover() prjs = ProjectRegistry.projects if projects_to_filter: prjs = {} for filter_project in set(projects_to_filter): try: prjs.update({ x: y for x, y in ProjectRegistry.projects.items( prefix=filter_project) }) except KeyError: pass if group: groupkeys = set(group) prjs = { name: cls for name, cls in prjs.items() if cls.GROUP in groupkeys } return { x: prjs[x] for x in prjs if prjs[x].DOMAIN != "debug" or x in projects_to_filter }
python
def populate(projects_to_filter=None, group=None): """ Populate the list of projects that belong to this experiment. Args: projects_to_filter (list(Project)): List of projects we want to assign to this experiment. We intersect the list of projects with the list of supported projects to get the list of projects that belong to this experiment. group (list(str)): In addition to the project filter, we provide a way to filter whole groups. """ if projects_to_filter is None: projects_to_filter = [] import benchbuild.projects as all_projects all_projects.discover() prjs = ProjectRegistry.projects if projects_to_filter: prjs = {} for filter_project in set(projects_to_filter): try: prjs.update({ x: y for x, y in ProjectRegistry.projects.items( prefix=filter_project) }) except KeyError: pass if group: groupkeys = set(group) prjs = { name: cls for name, cls in prjs.items() if cls.GROUP in groupkeys } return { x: prjs[x] for x in prjs if prjs[x].DOMAIN != "debug" or x in projects_to_filter }
[ "def", "populate", "(", "projects_to_filter", "=", "None", ",", "group", "=", "None", ")", ":", "if", "projects_to_filter", "is", "None", ":", "projects_to_filter", "=", "[", "]", "import", "benchbuild", ".", "projects", "as", "all_projects", "all_projects", ".", "discover", "(", ")", "prjs", "=", "ProjectRegistry", ".", "projects", "if", "projects_to_filter", ":", "prjs", "=", "{", "}", "for", "filter_project", "in", "set", "(", "projects_to_filter", ")", ":", "try", ":", "prjs", ".", "update", "(", "{", "x", ":", "y", "for", "x", ",", "y", "in", "ProjectRegistry", ".", "projects", ".", "items", "(", "prefix", "=", "filter_project", ")", "}", ")", "except", "KeyError", ":", "pass", "if", "group", ":", "groupkeys", "=", "set", "(", "group", ")", "prjs", "=", "{", "name", ":", "cls", "for", "name", ",", "cls", "in", "prjs", ".", "items", "(", ")", "if", "cls", ".", "GROUP", "in", "groupkeys", "}", "return", "{", "x", ":", "prjs", "[", "x", "]", "for", "x", "in", "prjs", "if", "prjs", "[", "x", "]", ".", "DOMAIN", "!=", "\"debug\"", "or", "x", "in", "projects_to_filter", "}" ]
Populate the list of projects that belong to this experiment. Args: projects_to_filter (list(Project)): List of projects we want to assign to this experiment. We intersect the list of projects with the list of supported projects to get the list of projects that belong to this experiment. group (list(str)): In addition to the project filter, we provide a way to filter whole groups.
[ "Populate", "the", "list", "of", "projects", "that", "belong", "to", "this", "experiment", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/project.py#L342-L385
PolyJIT/benchbuild
benchbuild/utils/compiler.py
cc
def cc(project, detect_project=False): """ Return a clang that hides CFLAGS and LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: cflags: The CFLAGS we want to hide. ldflags: The LDFLAGS we want to hide. func (optional): A function that will be pickled alongside the compiler. It will be called before the actual compilation took place. This way you can intercept the compilation process with arbitrary python code. Returns (benchbuild.utils.cmd): Path to the new clang command. """ from benchbuild.utils import cmd cc_name = str(CFG["compiler"]["c"]) wrap_cc(cc_name, compiler(cc_name), project, detect_project=detect_project) return cmd["./{}".format(cc_name)]
python
def cc(project, detect_project=False): """ Return a clang that hides CFLAGS and LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: cflags: The CFLAGS we want to hide. ldflags: The LDFLAGS we want to hide. func (optional): A function that will be pickled alongside the compiler. It will be called before the actual compilation took place. This way you can intercept the compilation process with arbitrary python code. Returns (benchbuild.utils.cmd): Path to the new clang command. """ from benchbuild.utils import cmd cc_name = str(CFG["compiler"]["c"]) wrap_cc(cc_name, compiler(cc_name), project, detect_project=detect_project) return cmd["./{}".format(cc_name)]
[ "def", "cc", "(", "project", ",", "detect_project", "=", "False", ")", ":", "from", "benchbuild", ".", "utils", "import", "cmd", "cc_name", "=", "str", "(", "CFG", "[", "\"compiler\"", "]", "[", "\"c\"", "]", ")", "wrap_cc", "(", "cc_name", ",", "compiler", "(", "cc_name", ")", ",", "project", ",", "detect_project", "=", "detect_project", ")", "return", "cmd", "[", "\"./{}\"", ".", "format", "(", "cc_name", ")", "]" ]
Return a clang that hides CFLAGS and LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: cflags: The CFLAGS we want to hide. ldflags: The LDFLAGS we want to hide. func (optional): A function that will be pickled alongside the compiler. It will be called before the actual compilation took place. This way you can intercept the compilation process with arbitrary python code. Returns (benchbuild.utils.cmd): Path to the new clang command.
[ "Return", "a", "clang", "that", "hides", "CFLAGS", "and", "LDFLAGS", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/compiler.py#L27-L49
PolyJIT/benchbuild
benchbuild/utils/compiler.py
cxx
def cxx(project, detect_project=False): """ Return a clang++ that hides CFLAGS and LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: cflags: The CFLAGS we want to hide. ldflags: The LDFLAGS we want to hide. func (optional): A function that will be pickled alongside the compiler. It will be called before the actual compilation took place. This way you can intercept the compilation process with arbitrary python code. Returns (benchbuild.utils.cmd): Path to the new clang command. """ from benchbuild.utils import cmd cxx_name = str(CFG["compiler"]["cxx"]) wrap_cc( cxx_name, compiler(cxx_name), project, detect_project=detect_project) return cmd["./{name}".format(name=cxx_name)]
python
def cxx(project, detect_project=False): """ Return a clang++ that hides CFLAGS and LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: cflags: The CFLAGS we want to hide. ldflags: The LDFLAGS we want to hide. func (optional): A function that will be pickled alongside the compiler. It will be called before the actual compilation took place. This way you can intercept the compilation process with arbitrary python code. Returns (benchbuild.utils.cmd): Path to the new clang command. """ from benchbuild.utils import cmd cxx_name = str(CFG["compiler"]["cxx"]) wrap_cc( cxx_name, compiler(cxx_name), project, detect_project=detect_project) return cmd["./{name}".format(name=cxx_name)]
[ "def", "cxx", "(", "project", ",", "detect_project", "=", "False", ")", ":", "from", "benchbuild", ".", "utils", "import", "cmd", "cxx_name", "=", "str", "(", "CFG", "[", "\"compiler\"", "]", "[", "\"cxx\"", "]", ")", "wrap_cc", "(", "cxx_name", ",", "compiler", "(", "cxx_name", ")", ",", "project", ",", "detect_project", "=", "detect_project", ")", "return", "cmd", "[", "\"./{name}\"", ".", "format", "(", "name", "=", "cxx_name", ")", "]" ]
Return a clang++ that hides CFLAGS and LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: cflags: The CFLAGS we want to hide. ldflags: The LDFLAGS we want to hide. func (optional): A function that will be pickled alongside the compiler. It will be called before the actual compilation took place. This way you can intercept the compilation process with arbitrary python code. Returns (benchbuild.utils.cmd): Path to the new clang command.
[ "Return", "a", "clang", "++", "that", "hides", "CFLAGS", "and", "LDFLAGS", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/compiler.py#L52-L75
PolyJIT/benchbuild
benchbuild/utils/compiler.py
compiler
def compiler(name): """ Get a usable clang++ plumbum command. This searches for a usable clang++ in the llvm binary path Returns: plumbum Command that executes clang++ """ pinfo = __get_paths() _compiler = local[name] _compiler = _compiler.setenv( PATH=pinfo["path"], LD_LIBRARY_PATH=pinfo["ld_library_path"]) return _compiler
python
def compiler(name): """ Get a usable clang++ plumbum command. This searches for a usable clang++ in the llvm binary path Returns: plumbum Command that executes clang++ """ pinfo = __get_paths() _compiler = local[name] _compiler = _compiler.setenv( PATH=pinfo["path"], LD_LIBRARY_PATH=pinfo["ld_library_path"]) return _compiler
[ "def", "compiler", "(", "name", ")", ":", "pinfo", "=", "__get_paths", "(", ")", "_compiler", "=", "local", "[", "name", "]", "_compiler", "=", "_compiler", ".", "setenv", "(", "PATH", "=", "pinfo", "[", "\"path\"", "]", ",", "LD_LIBRARY_PATH", "=", "pinfo", "[", "\"ld_library_path\"", "]", ")", "return", "_compiler" ]
Get a usable clang++ plumbum command. This searches for a usable clang++ in the llvm binary path Returns: plumbum Command that executes clang++
[ "Get", "a", "usable", "clang", "++", "plumbum", "command", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/compiler.py#L98-L111
PolyJIT/benchbuild
benchbuild/utils/wrapping.py
strip_path_prefix
def strip_path_prefix(ipath, prefix): """ Strip prefix from path. Args: ipath: input path prefix: the prefix to remove, if it is found in :ipath: Examples: >>> strip_path_prefix("/foo/bar", "/bar") '/foo/bar' >>> strip_path_prefix("/foo/bar", "/") 'foo/bar' >>> strip_path_prefix("/foo/bar", "/foo") '/bar' >>> strip_path_prefix("/foo/bar", "None") '/foo/bar' """ if prefix is None: return ipath return ipath[len(prefix):] if ipath.startswith(prefix) else ipath
python
def strip_path_prefix(ipath, prefix): """ Strip prefix from path. Args: ipath: input path prefix: the prefix to remove, if it is found in :ipath: Examples: >>> strip_path_prefix("/foo/bar", "/bar") '/foo/bar' >>> strip_path_prefix("/foo/bar", "/") 'foo/bar' >>> strip_path_prefix("/foo/bar", "/foo") '/bar' >>> strip_path_prefix("/foo/bar", "None") '/foo/bar' """ if prefix is None: return ipath return ipath[len(prefix):] if ipath.startswith(prefix) else ipath
[ "def", "strip_path_prefix", "(", "ipath", ",", "prefix", ")", ":", "if", "prefix", "is", "None", ":", "return", "ipath", "return", "ipath", "[", "len", "(", "prefix", ")", ":", "]", "if", "ipath", ".", "startswith", "(", "prefix", ")", "else", "ipath" ]
Strip prefix from path. Args: ipath: input path prefix: the prefix to remove, if it is found in :ipath: Examples: >>> strip_path_prefix("/foo/bar", "/bar") '/foo/bar' >>> strip_path_prefix("/foo/bar", "/") 'foo/bar' >>> strip_path_prefix("/foo/bar", "/foo") '/bar' >>> strip_path_prefix("/foo/bar", "None") '/foo/bar'
[ "Strip", "prefix", "from", "path", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L43-L65
PolyJIT/benchbuild
benchbuild/utils/wrapping.py
unpickle
def unpickle(pickle_file): """Unpickle a python object from the given path.""" pickle = None with open(pickle_file, "rb") as pickle_f: pickle = dill.load(pickle_f) if not pickle: LOG.error("Could not load python object from file") return pickle
python
def unpickle(pickle_file): """Unpickle a python object from the given path.""" pickle = None with open(pickle_file, "rb") as pickle_f: pickle = dill.load(pickle_f) if not pickle: LOG.error("Could not load python object from file") return pickle
[ "def", "unpickle", "(", "pickle_file", ")", ":", "pickle", "=", "None", "with", "open", "(", "pickle_file", ",", "\"rb\"", ")", "as", "pickle_f", ":", "pickle", "=", "dill", ".", "load", "(", "pickle_f", ")", "if", "not", "pickle", ":", "LOG", ".", "error", "(", "\"Could not load python object from file\"", ")", "return", "pickle" ]
Unpickle a python object from the given path.
[ "Unpickle", "a", "python", "object", "from", "the", "given", "path", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L68-L75
PolyJIT/benchbuild
benchbuild/utils/wrapping.py
wrap
def wrap(name, project, sprefix=None, python=sys.executable): """ Wrap the binary :name: with the runtime extension of the project. This module generates a python tool that replaces :name: The function in runner only accepts the replaced binaries name as argument. We use the cloudpickle package to perform the serialization, make sure :runner: can be serialized with it and you're fine. Args: name: Binary we want to wrap project: The project that contains the runtime_extension we want to run instead of the binary. Returns: A plumbum command, ready to launch. """ env = __create_jinja_env() template = env.get_template('run_static.py.inc') name_absolute = os.path.abspath(name) real_f = name_absolute + PROJECT_BIN_F_EXT if sprefix: run(uchroot()["/bin/mv", strip_path_prefix(name_absolute, sprefix), strip_path_prefix(real_f, sprefix)]) else: run(mv[name_absolute, real_f]) project_file = persist(project, suffix=".project") env = CFG['env'].value bin_path = list_to_path(env.get('PATH', [])) bin_path = list_to_path([bin_path, os.environ["PATH"]]) bin_lib_path = list_to_path(env.get('LD_LIBRARY_PATH', [])) bin_lib_path = list_to_path([bin_lib_path, os.environ["LD_LIBRARY_PATH"]]) with open(name_absolute, 'w') as wrapper: wrapper.write( template.render( runf=strip_path_prefix(real_f, sprefix), project_file=strip_path_prefix(project_file, sprefix), path=str(bin_path), ld_library_path=str(bin_lib_path), python=python, )) run(chmod["+x", name_absolute]) return local[name_absolute]
python
def wrap(name, project, sprefix=None, python=sys.executable): """ Wrap the binary :name: with the runtime extension of the project. This module generates a python tool that replaces :name: The function in runner only accepts the replaced binaries name as argument. We use the cloudpickle package to perform the serialization, make sure :runner: can be serialized with it and you're fine. Args: name: Binary we want to wrap project: The project that contains the runtime_extension we want to run instead of the binary. Returns: A plumbum command, ready to launch. """ env = __create_jinja_env() template = env.get_template('run_static.py.inc') name_absolute = os.path.abspath(name) real_f = name_absolute + PROJECT_BIN_F_EXT if sprefix: run(uchroot()["/bin/mv", strip_path_prefix(name_absolute, sprefix), strip_path_prefix(real_f, sprefix)]) else: run(mv[name_absolute, real_f]) project_file = persist(project, suffix=".project") env = CFG['env'].value bin_path = list_to_path(env.get('PATH', [])) bin_path = list_to_path([bin_path, os.environ["PATH"]]) bin_lib_path = list_to_path(env.get('LD_LIBRARY_PATH', [])) bin_lib_path = list_to_path([bin_lib_path, os.environ["LD_LIBRARY_PATH"]]) with open(name_absolute, 'w') as wrapper: wrapper.write( template.render( runf=strip_path_prefix(real_f, sprefix), project_file=strip_path_prefix(project_file, sprefix), path=str(bin_path), ld_library_path=str(bin_lib_path), python=python, )) run(chmod["+x", name_absolute]) return local[name_absolute]
[ "def", "wrap", "(", "name", ",", "project", ",", "sprefix", "=", "None", ",", "python", "=", "sys", ".", "executable", ")", ":", "env", "=", "__create_jinja_env", "(", ")", "template", "=", "env", ".", "get_template", "(", "'run_static.py.inc'", ")", "name_absolute", "=", "os", ".", "path", ".", "abspath", "(", "name", ")", "real_f", "=", "name_absolute", "+", "PROJECT_BIN_F_EXT", "if", "sprefix", ":", "run", "(", "uchroot", "(", ")", "[", "\"/bin/mv\"", ",", "strip_path_prefix", "(", "name_absolute", ",", "sprefix", ")", ",", "strip_path_prefix", "(", "real_f", ",", "sprefix", ")", "]", ")", "else", ":", "run", "(", "mv", "[", "name_absolute", ",", "real_f", "]", ")", "project_file", "=", "persist", "(", "project", ",", "suffix", "=", "\".project\"", ")", "env", "=", "CFG", "[", "'env'", "]", ".", "value", "bin_path", "=", "list_to_path", "(", "env", ".", "get", "(", "'PATH'", ",", "[", "]", ")", ")", "bin_path", "=", "list_to_path", "(", "[", "bin_path", ",", "os", ".", "environ", "[", "\"PATH\"", "]", "]", ")", "bin_lib_path", "=", "list_to_path", "(", "env", ".", "get", "(", "'LD_LIBRARY_PATH'", ",", "[", "]", ")", ")", "bin_lib_path", "=", "list_to_path", "(", "[", "bin_lib_path", ",", "os", ".", "environ", "[", "\"LD_LIBRARY_PATH\"", "]", "]", ")", "with", "open", "(", "name_absolute", ",", "'w'", ")", "as", "wrapper", ":", "wrapper", ".", "write", "(", "template", ".", "render", "(", "runf", "=", "strip_path_prefix", "(", "real_f", ",", "sprefix", ")", ",", "project_file", "=", "strip_path_prefix", "(", "project_file", ",", "sprefix", ")", ",", "path", "=", "str", "(", "bin_path", ")", ",", "ld_library_path", "=", "str", "(", "bin_lib_path", ")", ",", "python", "=", "python", ",", ")", ")", "run", "(", "chmod", "[", "\"+x\"", ",", "name_absolute", "]", ")", "return", "local", "[", "name_absolute", "]" ]
Wrap the binary :name: with the runtime extension of the project. This module generates a python tool that replaces :name: The function in runner only accepts the replaced binaries name as argument. We use the cloudpickle package to perform the serialization, make sure :runner: can be serialized with it and you're fine. Args: name: Binary we want to wrap project: The project that contains the runtime_extension we want to run instead of the binary. Returns: A plumbum command, ready to launch.
[ "Wrap", "the", "binary", ":", "name", ":", "with", "the", "runtime", "extension", "of", "the", "project", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L86-L136
PolyJIT/benchbuild
benchbuild/utils/wrapping.py
wrap_cc
def wrap_cc(filepath, compiler, project, python=sys.executable, detect_project=False): """ Substitute a compiler with a script that hides CFLAGS & LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: filepath (str): Path to the wrapper script. compiler (benchbuild.utils.cmd): Real compiler command we should call in the script. project (benchbuild.project.Project): The project this compiler will be for. python (str): Path to the python interpreter we should use. detect_project: Should we enable project detection or not. Returns (benchbuild.utils.cmd): Command of the new compiler we can call. """ env = __create_jinja_env() template = env.get_template('run_compiler.py.inc') cc_fname = local.path(filepath).with_suffix(".benchbuild.cc", depth=0) cc_f = persist(compiler, filename=cc_fname) project_file = persist(project, suffix=".project") with open(filepath, 'w') as wrapper: wrapper.write( template.render( cc_f=cc_f, project_file=project_file, python=python, detect_project=detect_project)) chmod("+x", filepath) LOG.debug("Placed wrapper in: %s for compiler %s", local.path(filepath), str(compiler)) LOG.debug("Placed project in: %s", local.path(project_file)) LOG.debug("Placed compiler command in: %s", local.path(cc_f)) return local[filepath]
python
def wrap_cc(filepath, compiler, project, python=sys.executable, detect_project=False): """ Substitute a compiler with a script that hides CFLAGS & LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: filepath (str): Path to the wrapper script. compiler (benchbuild.utils.cmd): Real compiler command we should call in the script. project (benchbuild.project.Project): The project this compiler will be for. python (str): Path to the python interpreter we should use. detect_project: Should we enable project detection or not. Returns (benchbuild.utils.cmd): Command of the new compiler we can call. """ env = __create_jinja_env() template = env.get_template('run_compiler.py.inc') cc_fname = local.path(filepath).with_suffix(".benchbuild.cc", depth=0) cc_f = persist(compiler, filename=cc_fname) project_file = persist(project, suffix=".project") with open(filepath, 'w') as wrapper: wrapper.write( template.render( cc_f=cc_f, project_file=project_file, python=python, detect_project=detect_project)) chmod("+x", filepath) LOG.debug("Placed wrapper in: %s for compiler %s", local.path(filepath), str(compiler)) LOG.debug("Placed project in: %s", local.path(project_file)) LOG.debug("Placed compiler command in: %s", local.path(cc_f)) return local[filepath]
[ "def", "wrap_cc", "(", "filepath", ",", "compiler", ",", "project", ",", "python", "=", "sys", ".", "executable", ",", "detect_project", "=", "False", ")", ":", "env", "=", "__create_jinja_env", "(", ")", "template", "=", "env", ".", "get_template", "(", "'run_compiler.py.inc'", ")", "cc_fname", "=", "local", ".", "path", "(", "filepath", ")", ".", "with_suffix", "(", "\".benchbuild.cc\"", ",", "depth", "=", "0", ")", "cc_f", "=", "persist", "(", "compiler", ",", "filename", "=", "cc_fname", ")", "project_file", "=", "persist", "(", "project", ",", "suffix", "=", "\".project\"", ")", "with", "open", "(", "filepath", ",", "'w'", ")", "as", "wrapper", ":", "wrapper", ".", "write", "(", "template", ".", "render", "(", "cc_f", "=", "cc_f", ",", "project_file", "=", "project_file", ",", "python", "=", "python", ",", "detect_project", "=", "detect_project", ")", ")", "chmod", "(", "\"+x\"", ",", "filepath", ")", "LOG", ".", "debug", "(", "\"Placed wrapper in: %s for compiler %s\"", ",", "local", ".", "path", "(", "filepath", ")", ",", "str", "(", "compiler", ")", ")", "LOG", ".", "debug", "(", "\"Placed project in: %s\"", ",", "local", ".", "path", "(", "project_file", ")", ")", "LOG", ".", "debug", "(", "\"Placed compiler command in: %s\"", ",", "local", ".", "path", "(", "cc_f", ")", ")", "return", "local", "[", "filepath", "]" ]
Substitute a compiler with a script that hides CFLAGS & LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: filepath (str): Path to the wrapper script. compiler (benchbuild.utils.cmd): Real compiler command we should call in the script. project (benchbuild.project.Project): The project this compiler will be for. python (str): Path to the python interpreter we should use. detect_project: Should we enable project detection or not. Returns (benchbuild.utils.cmd): Command of the new compiler we can call.
[ "Substitute", "a", "compiler", "with", "a", "script", "that", "hides", "CFLAGS", "&", "LDFLAGS", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L200-L244
PolyJIT/benchbuild
benchbuild/utils/wrapping.py
persist
def persist(id_obj, filename=None, suffix=None): """Persist an object in the filesystem. This will generate a pickled version of the given obj in the filename path. Objects shall provide an id() method to be able to use this persistence API. If not, we will use the id() builtin of python to generate an identifier for you. The file will be created, if it does not exist. If the file already exists, we will overwrite it. Args: id_obj (Any): An identifiable object you want to persist in the filesystem. """ if suffix is None: suffix = ".pickle" if hasattr(id_obj, 'id'): ident = id_obj.id else: ident = str(id(id_obj)) if filename is None: filename = "{obj_id}{suffix}".format(obj_id=ident, suffix=suffix) with open(filename, 'wb') as obj_file: dill.dump(id_obj, obj_file) return os.path.abspath(filename)
python
def persist(id_obj, filename=None, suffix=None): """Persist an object in the filesystem. This will generate a pickled version of the given obj in the filename path. Objects shall provide an id() method to be able to use this persistence API. If not, we will use the id() builtin of python to generate an identifier for you. The file will be created, if it does not exist. If the file already exists, we will overwrite it. Args: id_obj (Any): An identifiable object you want to persist in the filesystem. """ if suffix is None: suffix = ".pickle" if hasattr(id_obj, 'id'): ident = id_obj.id else: ident = str(id(id_obj)) if filename is None: filename = "{obj_id}{suffix}".format(obj_id=ident, suffix=suffix) with open(filename, 'wb') as obj_file: dill.dump(id_obj, obj_file) return os.path.abspath(filename)
[ "def", "persist", "(", "id_obj", ",", "filename", "=", "None", ",", "suffix", "=", "None", ")", ":", "if", "suffix", "is", "None", ":", "suffix", "=", "\".pickle\"", "if", "hasattr", "(", "id_obj", ",", "'id'", ")", ":", "ident", "=", "id_obj", ".", "id", "else", ":", "ident", "=", "str", "(", "id", "(", "id_obj", ")", ")", "if", "filename", "is", "None", ":", "filename", "=", "\"{obj_id}{suffix}\"", ".", "format", "(", "obj_id", "=", "ident", ",", "suffix", "=", "suffix", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "obj_file", ":", "dill", ".", "dump", "(", "id_obj", ",", "obj_file", ")", "return", "os", ".", "path", ".", "abspath", "(", "filename", ")" ]
Persist an object in the filesystem. This will generate a pickled version of the given obj in the filename path. Objects shall provide an id() method to be able to use this persistence API. If not, we will use the id() builtin of python to generate an identifier for you. The file will be created, if it does not exist. If the file already exists, we will overwrite it. Args: id_obj (Any): An identifiable object you want to persist in the filesystem.
[ "Persist", "an", "object", "in", "the", "filesystem", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L247-L274
PolyJIT/benchbuild
benchbuild/utils/wrapping.py
load
def load(filename): """Load a pickled obj from the filesystem. You better know what you expect from the given pickle, because we don't check it. Args: filename (str): The filename we load the object from. Returns: The object we were able to unpickle, else None. """ if not os.path.exists(filename): LOG.error("load object - File '%s' does not exist.", filename) return None obj = None with open(filename, 'rb') as obj_file: obj = dill.load(obj_file) return obj
python
def load(filename): """Load a pickled obj from the filesystem. You better know what you expect from the given pickle, because we don't check it. Args: filename (str): The filename we load the object from. Returns: The object we were able to unpickle, else None. """ if not os.path.exists(filename): LOG.error("load object - File '%s' does not exist.", filename) return None obj = None with open(filename, 'rb') as obj_file: obj = dill.load(obj_file) return obj
[ "def", "load", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "LOG", ".", "error", "(", "\"load object - File '%s' does not exist.\"", ",", "filename", ")", "return", "None", "obj", "=", "None", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "obj_file", ":", "obj", "=", "dill", ".", "load", "(", "obj_file", ")", "return", "obj" ]
Load a pickled obj from the filesystem. You better know what you expect from the given pickle, because we don't check it. Args: filename (str): The filename we load the object from. Returns: The object we were able to unpickle, else None.
[ "Load", "a", "pickled", "obj", "from", "the", "filesystem", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L277-L295
sci-bots/svg-model
svg_model/connections.py
extend_shapes
def extend_shapes(df_shapes, axis, distance): ''' Extend shape/polygon outline away from polygon center point by absolute distance. ''' df_shapes_i = df_shapes.copy() offsets = df_shapes_i[axis + '_center_offset'].copy() offsets[offsets < 0] -= distance offsets[offsets >= 0] += distance df_shapes_i[axis] = df_shapes_i[axis + '_center'] + offsets return df_shapes_i
python
def extend_shapes(df_shapes, axis, distance): ''' Extend shape/polygon outline away from polygon center point by absolute distance. ''' df_shapes_i = df_shapes.copy() offsets = df_shapes_i[axis + '_center_offset'].copy() offsets[offsets < 0] -= distance offsets[offsets >= 0] += distance df_shapes_i[axis] = df_shapes_i[axis + '_center'] + offsets return df_shapes_i
[ "def", "extend_shapes", "(", "df_shapes", ",", "axis", ",", "distance", ")", ":", "df_shapes_i", "=", "df_shapes", ".", "copy", "(", ")", "offsets", "=", "df_shapes_i", "[", "axis", "+", "'_center_offset'", "]", ".", "copy", "(", ")", "offsets", "[", "offsets", "<", "0", "]", "-=", "distance", "offsets", "[", "offsets", ">=", "0", "]", "+=", "distance", "df_shapes_i", "[", "axis", "]", "=", "df_shapes_i", "[", "axis", "+", "'_center'", "]", "+", "offsets", "return", "df_shapes_i" ]
Extend shape/polygon outline away from polygon center point by absolute distance.
[ "Extend", "shape", "/", "polygon", "outline", "away", "from", "polygon", "center", "point", "by", "absolute", "distance", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/connections.py#L17-L27
sci-bots/svg-model
svg_model/connections.py
extract_adjacent_shapes
def extract_adjacent_shapes(df_shapes, shape_i_column, extend=.5): ''' Generate list of connections between "adjacent" polygon shapes based on geometrical "closeness". Parameters ---------- df_shapes : pandas.DataFrame Table of polygon shape vertices (one row per vertex). Table rows with the same value in the :data:`shape_i_column` column are grouped together as a polygon. shape_i_column : str or list[str] Column name(s) that identify the polygon each row belongs to. extend : float, optional Extend ``x``/``y`` coords by the specified number of absolute units from the center point of each polygon. Each polygon is stretched independently in the ``x`` and ``y`` direction. In each direction, a polygon considered adjacent to all polygons that are overlapped by the extended shape. Returns ------- pandas.DataFrame Adjacency list as a frame containing the columns ``source`` and ``target``. The ``source`` and ``target`` of each adjacency connection is ordered such that the ``source`` is less than the ``target``. ''' # Find corners of each solid shape outline. # Extend x coords by abs units df_scaled_x = extend_shapes(df_shapes, 'x', extend) # Extend y coords by abs units df_scaled_y = extend_shapes(df_shapes, 'y', extend) df_corners = df_shapes.groupby(shape_i_column).agg({'x': ['min', 'max'], 'y': ['min', 'max']}) # Find adjacent electrodes row_list = [] for shapeNumber in df_shapes[shape_i_column].drop_duplicates(): df_stretched = df_scaled_x[df_scaled_x[shape_i_column] .isin([shapeNumber])] xmin_x, xmax_x, ymin_x, ymax_x = (df_stretched.x.min(), df_stretched.x.max(), df_stretched.y.min(), df_stretched.y.max()) df_stretched = df_scaled_y[df_scaled_y[shape_i_column] .isin([shapeNumber])] xmin_y, xmax_y, ymin_y, ymax_y = (df_stretched.x.min(), df_stretched.x.max(), df_stretched.y.min(), df_stretched.y.max()) #Some conditions unnecessary if it is assumed that electrodes don't overlap adjacent = df_corners[ ((df_corners.x['min'] < xmax_x) & (df_corners.x['max'] >= xmax_x) # Check in x stretched direction |(df_corners.x['min'] < xmin_x) & (df_corners.x['max'] >= xmin_x)) # Check if y is within bounds & (df_corners.y['min'] < ymax_x) & (df_corners.y['max'] > ymin_x) | #maybe do ymax_x - df_corners.y['min'] > threshold & # df_corners.y['max'] - ymin_x > threshold ((df_corners.y['min'] < ymax_y) & (df_corners.y['max'] >= ymax_y) # Checks in y stretched direction |(df_corners.y['min'] < ymin_y) & (df_corners.y['max'] >= ymin_y)) # Check if x in within bounds & ((df_corners.x['min'] < xmax_y) & (df_corners.x['max'] > xmin_y)) ].index.values for shape in adjacent: temp_dict = {} reverse_dict = {} temp_dict ['source'] = shapeNumber reverse_dict['source'] = shape temp_dict ['target'] = shape reverse_dict['target'] = shapeNumber if(reverse_dict not in row_list): row_list.append(temp_dict) df_connected = (pd.DataFrame(row_list)[['source', 'target']] .sort_index(axis=1, ascending=True) .sort_values(['source', 'target'])) return df_connected
python
def extract_adjacent_shapes(df_shapes, shape_i_column, extend=.5): ''' Generate list of connections between "adjacent" polygon shapes based on geometrical "closeness". Parameters ---------- df_shapes : pandas.DataFrame Table of polygon shape vertices (one row per vertex). Table rows with the same value in the :data:`shape_i_column` column are grouped together as a polygon. shape_i_column : str or list[str] Column name(s) that identify the polygon each row belongs to. extend : float, optional Extend ``x``/``y`` coords by the specified number of absolute units from the center point of each polygon. Each polygon is stretched independently in the ``x`` and ``y`` direction. In each direction, a polygon considered adjacent to all polygons that are overlapped by the extended shape. Returns ------- pandas.DataFrame Adjacency list as a frame containing the columns ``source`` and ``target``. The ``source`` and ``target`` of each adjacency connection is ordered such that the ``source`` is less than the ``target``. ''' # Find corners of each solid shape outline. # Extend x coords by abs units df_scaled_x = extend_shapes(df_shapes, 'x', extend) # Extend y coords by abs units df_scaled_y = extend_shapes(df_shapes, 'y', extend) df_corners = df_shapes.groupby(shape_i_column).agg({'x': ['min', 'max'], 'y': ['min', 'max']}) # Find adjacent electrodes row_list = [] for shapeNumber in df_shapes[shape_i_column].drop_duplicates(): df_stretched = df_scaled_x[df_scaled_x[shape_i_column] .isin([shapeNumber])] xmin_x, xmax_x, ymin_x, ymax_x = (df_stretched.x.min(), df_stretched.x.max(), df_stretched.y.min(), df_stretched.y.max()) df_stretched = df_scaled_y[df_scaled_y[shape_i_column] .isin([shapeNumber])] xmin_y, xmax_y, ymin_y, ymax_y = (df_stretched.x.min(), df_stretched.x.max(), df_stretched.y.min(), df_stretched.y.max()) #Some conditions unnecessary if it is assumed that electrodes don't overlap adjacent = df_corners[ ((df_corners.x['min'] < xmax_x) & (df_corners.x['max'] >= xmax_x) # Check in x stretched direction |(df_corners.x['min'] < xmin_x) & (df_corners.x['max'] >= xmin_x)) # Check if y is within bounds & (df_corners.y['min'] < ymax_x) & (df_corners.y['max'] > ymin_x) | #maybe do ymax_x - df_corners.y['min'] > threshold & # df_corners.y['max'] - ymin_x > threshold ((df_corners.y['min'] < ymax_y) & (df_corners.y['max'] >= ymax_y) # Checks in y stretched direction |(df_corners.y['min'] < ymin_y) & (df_corners.y['max'] >= ymin_y)) # Check if x in within bounds & ((df_corners.x['min'] < xmax_y) & (df_corners.x['max'] > xmin_y)) ].index.values for shape in adjacent: temp_dict = {} reverse_dict = {} temp_dict ['source'] = shapeNumber reverse_dict['source'] = shape temp_dict ['target'] = shape reverse_dict['target'] = shapeNumber if(reverse_dict not in row_list): row_list.append(temp_dict) df_connected = (pd.DataFrame(row_list)[['source', 'target']] .sort_index(axis=1, ascending=True) .sort_values(['source', 'target'])) return df_connected
[ "def", "extract_adjacent_shapes", "(", "df_shapes", ",", "shape_i_column", ",", "extend", "=", ".5", ")", ":", "# Find corners of each solid shape outline.", "# Extend x coords by abs units", "df_scaled_x", "=", "extend_shapes", "(", "df_shapes", ",", "'x'", ",", "extend", ")", "# Extend y coords by abs units", "df_scaled_y", "=", "extend_shapes", "(", "df_shapes", ",", "'y'", ",", "extend", ")", "df_corners", "=", "df_shapes", ".", "groupby", "(", "shape_i_column", ")", ".", "agg", "(", "{", "'x'", ":", "[", "'min'", ",", "'max'", "]", ",", "'y'", ":", "[", "'min'", ",", "'max'", "]", "}", ")", "# Find adjacent electrodes", "row_list", "=", "[", "]", "for", "shapeNumber", "in", "df_shapes", "[", "shape_i_column", "]", ".", "drop_duplicates", "(", ")", ":", "df_stretched", "=", "df_scaled_x", "[", "df_scaled_x", "[", "shape_i_column", "]", ".", "isin", "(", "[", "shapeNumber", "]", ")", "]", "xmin_x", ",", "xmax_x", ",", "ymin_x", ",", "ymax_x", "=", "(", "df_stretched", ".", "x", ".", "min", "(", ")", ",", "df_stretched", ".", "x", ".", "max", "(", ")", ",", "df_stretched", ".", "y", ".", "min", "(", ")", ",", "df_stretched", ".", "y", ".", "max", "(", ")", ")", "df_stretched", "=", "df_scaled_y", "[", "df_scaled_y", "[", "shape_i_column", "]", ".", "isin", "(", "[", "shapeNumber", "]", ")", "]", "xmin_y", ",", "xmax_y", ",", "ymin_y", ",", "ymax_y", "=", "(", "df_stretched", ".", "x", ".", "min", "(", ")", ",", "df_stretched", ".", "x", ".", "max", "(", ")", ",", "df_stretched", ".", "y", ".", "min", "(", ")", ",", "df_stretched", ".", "y", ".", "max", "(", ")", ")", "#Some conditions unnecessary if it is assumed that electrodes don't overlap", "adjacent", "=", "df_corners", "[", "(", "(", "df_corners", ".", "x", "[", "'min'", "]", "<", "xmax_x", ")", "&", "(", "df_corners", ".", "x", "[", "'max'", "]", ">=", "xmax_x", ")", "# Check in x stretched direction", "|", "(", "df_corners", ".", "x", "[", "'min'", "]", "<", "xmin_x", ")", "&", "(", "df_corners", ".", "x", "[", "'max'", "]", ">=", "xmin_x", ")", ")", "# Check if y is within bounds", "&", "(", "df_corners", ".", "y", "[", "'min'", "]", "<", "ymax_x", ")", "&", "(", "df_corners", ".", "y", "[", "'max'", "]", ">", "ymin_x", ")", "|", "#maybe do ymax_x - df_corners.y['min'] > threshold &", "# df_corners.y['max'] - ymin_x > threshold", "(", "(", "df_corners", ".", "y", "[", "'min'", "]", "<", "ymax_y", ")", "&", "(", "df_corners", ".", "y", "[", "'max'", "]", ">=", "ymax_y", ")", "# Checks in y stretched direction", "|", "(", "df_corners", ".", "y", "[", "'min'", "]", "<", "ymin_y", ")", "&", "(", "df_corners", ".", "y", "[", "'max'", "]", ">=", "ymin_y", ")", ")", "# Check if x in within bounds", "&", "(", "(", "df_corners", ".", "x", "[", "'min'", "]", "<", "xmax_y", ")", "&", "(", "df_corners", ".", "x", "[", "'max'", "]", ">", "xmin_y", ")", ")", "]", ".", "index", ".", "values", "for", "shape", "in", "adjacent", ":", "temp_dict", "=", "{", "}", "reverse_dict", "=", "{", "}", "temp_dict", "[", "'source'", "]", "=", "shapeNumber", "reverse_dict", "[", "'source'", "]", "=", "shape", "temp_dict", "[", "'target'", "]", "=", "shape", "reverse_dict", "[", "'target'", "]", "=", "shapeNumber", "if", "(", "reverse_dict", "not", "in", "row_list", ")", ":", "row_list", ".", "append", "(", "temp_dict", ")", "df_connected", "=", "(", "pd", ".", "DataFrame", "(", "row_list", ")", "[", "[", "'source'", ",", "'target'", "]", "]", ".", "sort_index", "(", "axis", "=", "1", ",", "ascending", "=", "True", ")", ".", "sort_values", "(", "[", "'source'", ",", "'target'", "]", ")", ")", "return", "df_connected" ]
Generate list of connections between "adjacent" polygon shapes based on geometrical "closeness". Parameters ---------- df_shapes : pandas.DataFrame Table of polygon shape vertices (one row per vertex). Table rows with the same value in the :data:`shape_i_column` column are grouped together as a polygon. shape_i_column : str or list[str] Column name(s) that identify the polygon each row belongs to. extend : float, optional Extend ``x``/``y`` coords by the specified number of absolute units from the center point of each polygon. Each polygon is stretched independently in the ``x`` and ``y`` direction. In each direction, a polygon considered adjacent to all polygons that are overlapped by the extended shape. Returns ------- pandas.DataFrame Adjacency list as a frame containing the columns ``source`` and ``target``. The ``source`` and ``target`` of each adjacency connection is ordered such that the ``source`` is less than the ``target``.
[ "Generate", "list", "of", "connections", "between", "adjacent", "polygon", "shapes", "based", "on", "geometrical", "closeness", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/connections.py#L30-L119
sci-bots/svg-model
svg_model/connections.py
get_adjacency_matrix
def get_adjacency_matrix(df_connected): ''' Return matrix where $a_{i,j} = 1$ indicates polygon $i$ is connected to polygon $j$. Also, return mapping (and reverse mapping) from original keys in `df_connected` to zero-based integer index used for matrix rows and columns. ''' sorted_path_keys = np.sort(np.unique(df_connected[['source', 'target']] .values.ravel())) indexed_paths = pd.Series(sorted_path_keys) path_indexes = pd.Series(indexed_paths.index, index=sorted_path_keys) adjacency_matrix = np.zeros((path_indexes.shape[0], ) * 2, dtype=int) for i_key, j_key in df_connected[['source', 'target']].values: i, j = path_indexes.loc[[i_key, j_key]] adjacency_matrix[i, j] = 1 adjacency_matrix[j, i] = 1 return adjacency_matrix, indexed_paths, path_indexes
python
def get_adjacency_matrix(df_connected): ''' Return matrix where $a_{i,j} = 1$ indicates polygon $i$ is connected to polygon $j$. Also, return mapping (and reverse mapping) from original keys in `df_connected` to zero-based integer index used for matrix rows and columns. ''' sorted_path_keys = np.sort(np.unique(df_connected[['source', 'target']] .values.ravel())) indexed_paths = pd.Series(sorted_path_keys) path_indexes = pd.Series(indexed_paths.index, index=sorted_path_keys) adjacency_matrix = np.zeros((path_indexes.shape[0], ) * 2, dtype=int) for i_key, j_key in df_connected[['source', 'target']].values: i, j = path_indexes.loc[[i_key, j_key]] adjacency_matrix[i, j] = 1 adjacency_matrix[j, i] = 1 return adjacency_matrix, indexed_paths, path_indexes
[ "def", "get_adjacency_matrix", "(", "df_connected", ")", ":", "sorted_path_keys", "=", "np", ".", "sort", "(", "np", ".", "unique", "(", "df_connected", "[", "[", "'source'", ",", "'target'", "]", "]", ".", "values", ".", "ravel", "(", ")", ")", ")", "indexed_paths", "=", "pd", ".", "Series", "(", "sorted_path_keys", ")", "path_indexes", "=", "pd", ".", "Series", "(", "indexed_paths", ".", "index", ",", "index", "=", "sorted_path_keys", ")", "adjacency_matrix", "=", "np", ".", "zeros", "(", "(", "path_indexes", ".", "shape", "[", "0", "]", ",", ")", "*", "2", ",", "dtype", "=", "int", ")", "for", "i_key", ",", "j_key", "in", "df_connected", "[", "[", "'source'", ",", "'target'", "]", "]", ".", "values", ":", "i", ",", "j", "=", "path_indexes", ".", "loc", "[", "[", "i_key", ",", "j_key", "]", "]", "adjacency_matrix", "[", "i", ",", "j", "]", "=", "1", "adjacency_matrix", "[", "j", ",", "i", "]", "=", "1", "return", "adjacency_matrix", ",", "indexed_paths", ",", "path_indexes" ]
Return matrix where $a_{i,j} = 1$ indicates polygon $i$ is connected to polygon $j$. Also, return mapping (and reverse mapping) from original keys in `df_connected` to zero-based integer index used for matrix rows and columns.
[ "Return", "matrix", "where", "$a_", "{", "i", "j", "}", "=", "1$", "indicates", "polygon", "$i$", "is", "connected", "to", "polygon", "$j$", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/connections.py#L122-L141
sci-bots/svg-model
svg_model/connections.py
extract_connections
def extract_connections(svg_source, shapes_canvas, line_layer='Connections', line_xpath=None, path_xpath=None, namespaces=None): ''' Load all ``<svg:line>`` elements and ``<svg:path>`` elements from a layer of an SVG source. For each element, if endpoints overlap distinct shapes in :data:`shapes_canvas`, add connection between overlapped shapes. .. versionchanged:: 0.6.post1 Allow both ``<svg:line>`` *and* ``<svg:path>`` instances to denote connected/adjacent shapes. .. versionadded:: 0.6.post1 :data:`path_xpath` Parameters ---------- svg_source : filepath Input SVG file containing connection lines. shapes_canvas : shapes_canvas.ShapesCanvas Shapes canvas containing shapes to compare against connection endpoints. line_layer : str Name of layer in SVG containing connection lines. line_xpath : str XPath string to iterate through connection lines. path_xpath : str XPath string to iterate through connection paths. namespaces : dict SVG namespaces (compatible with :func:`etree.parse`). Returns ------- pandas.DataFrame Each row corresponds to connection between two shapes in :data:`shapes_canvas`, denoted ``source`` and ``target``. ''' from lxml import etree if namespaces is None: # Inkscape namespace is required to match SVG elements as well as # Inkscape-specific SVG tags and attributes (e.g., `inkscape:label`). namespaces = INKSCAPE_NSMAP # Parse SVG source. e_root = etree.parse(svg_source) # List to hold records of form: `[<id>, <x1>, <y1>, <x2>, <y2>]`. frames = [] if line_xpath is None: # Define query to look for `svg:line` elements in top level of layer of # SVG specified to contain connections. line_xpath = ("//svg:g[@inkscape:label='%s']/svg:line" % line_layer) coords_columns = ['x1', 'y1', 'x2', 'y2'] for line_i in e_root.xpath(line_xpath, namespaces=namespaces): # Extract start and end coordinate from `svg:line` element. line_i_dict = dict(list(line_i.items())) values = ([line_i_dict.get('id', None)] + [float(line_i_dict[k]) for k in coords_columns]) # Append record for end points of current line. frames.append(values) # Regular expression pattern to match start and end coordinates of # connection `svg:path` element. cre_path_ends = re.compile(r'^\s*M\s*(?P<start_x>\d+(\.\d+)?),\s*' r'(?P<start_y>\d+(\.\d+)?).*' # Diagonal line... r'((L\s*(?P<end_x>\d+(\.\d+)?),\s*' r'(?P<end_y>\d+(\.\d+)?))|' # or Vertical line... r'(V\s*(?P<end_vy>\d+(\.\d+)?))|' # or Horizontal line r'(H\s*(?P<end_hx>\d+(\.\d+)?))' r')\D*' r'$') if path_xpath is None: # Define query to look for `svg:path` elements in top level of layer of # SVG specified to contain connections. path_xpath = ("//svg:g[@inkscape:label='%s']/svg:path" % line_layer) for path_i in e_root.xpath(path_xpath, namespaces=namespaces): path_i_dict = dict(list(path_i.items())) match_i = cre_path_ends.match(path_i_dict['d']) if match_i: # Connection `svg:path` matched required format. Extract start and # end coordinates. match_dict_i = match_i.groupdict() if match_dict_i['end_vy']: # Path ended with vertical line match_dict_i['end_x'] = match_dict_i['start_x'] match_dict_i['end_y'] = match_dict_i['end_vy'] if match_dict_i['end_hx']: # Path ended with horizontal line match_dict_i['end_x'] = match_dict_i['end_hx'] match_dict_i['end_y'] = match_dict_i['start_y'] # Append record for end points of current path. frames.append([path_i_dict['id']] + list(map(float, (match_dict_i['start_x'], match_dict_i['start_y'], match_dict_i['end_x'], match_dict_i['end_y'])))) if not frames: return pd.DataFrame(None, columns=['source', 'target']) df_connection_lines = pd.DataFrame(frames, columns=['id'] + coords_columns) # Use `shapes_canvas.find_shape` to determine shapes overlapped by end # points of each `svg:path` or `svg:line`. df_shape_connections_i = pd.DataFrame([[shapes_canvas.find_shape(x1, y1), shapes_canvas.find_shape(x2, y2)] for i, (x1, y1, x2, y2) in df_connection_lines[coords_columns] .iterrows()], columns=['source', 'target']) # Order the source and target of each row so the source shape identifier is # always the lowest. df_shape_connections_i.sort_index(axis=1, inplace=True) # Tag each shape connection with the corresponding `svg:line`/`svg:path` # identifier. May be useful, e.g., in debugging. df_shape_connections_i['line_id'] = df_connection_lines['id'] # Remove connections where source or target shape was not matched (e.g., if # one or more end points does not overlap with a shape). return df_shape_connections_i.dropna()
python
def extract_connections(svg_source, shapes_canvas, line_layer='Connections', line_xpath=None, path_xpath=None, namespaces=None): ''' Load all ``<svg:line>`` elements and ``<svg:path>`` elements from a layer of an SVG source. For each element, if endpoints overlap distinct shapes in :data:`shapes_canvas`, add connection between overlapped shapes. .. versionchanged:: 0.6.post1 Allow both ``<svg:line>`` *and* ``<svg:path>`` instances to denote connected/adjacent shapes. .. versionadded:: 0.6.post1 :data:`path_xpath` Parameters ---------- svg_source : filepath Input SVG file containing connection lines. shapes_canvas : shapes_canvas.ShapesCanvas Shapes canvas containing shapes to compare against connection endpoints. line_layer : str Name of layer in SVG containing connection lines. line_xpath : str XPath string to iterate through connection lines. path_xpath : str XPath string to iterate through connection paths. namespaces : dict SVG namespaces (compatible with :func:`etree.parse`). Returns ------- pandas.DataFrame Each row corresponds to connection between two shapes in :data:`shapes_canvas`, denoted ``source`` and ``target``. ''' from lxml import etree if namespaces is None: # Inkscape namespace is required to match SVG elements as well as # Inkscape-specific SVG tags and attributes (e.g., `inkscape:label`). namespaces = INKSCAPE_NSMAP # Parse SVG source. e_root = etree.parse(svg_source) # List to hold records of form: `[<id>, <x1>, <y1>, <x2>, <y2>]`. frames = [] if line_xpath is None: # Define query to look for `svg:line` elements in top level of layer of # SVG specified to contain connections. line_xpath = ("//svg:g[@inkscape:label='%s']/svg:line" % line_layer) coords_columns = ['x1', 'y1', 'x2', 'y2'] for line_i in e_root.xpath(line_xpath, namespaces=namespaces): # Extract start and end coordinate from `svg:line` element. line_i_dict = dict(list(line_i.items())) values = ([line_i_dict.get('id', None)] + [float(line_i_dict[k]) for k in coords_columns]) # Append record for end points of current line. frames.append(values) # Regular expression pattern to match start and end coordinates of # connection `svg:path` element. cre_path_ends = re.compile(r'^\s*M\s*(?P<start_x>\d+(\.\d+)?),\s*' r'(?P<start_y>\d+(\.\d+)?).*' # Diagonal line... r'((L\s*(?P<end_x>\d+(\.\d+)?),\s*' r'(?P<end_y>\d+(\.\d+)?))|' # or Vertical line... r'(V\s*(?P<end_vy>\d+(\.\d+)?))|' # or Horizontal line r'(H\s*(?P<end_hx>\d+(\.\d+)?))' r')\D*' r'$') if path_xpath is None: # Define query to look for `svg:path` elements in top level of layer of # SVG specified to contain connections. path_xpath = ("//svg:g[@inkscape:label='%s']/svg:path" % line_layer) for path_i in e_root.xpath(path_xpath, namespaces=namespaces): path_i_dict = dict(list(path_i.items())) match_i = cre_path_ends.match(path_i_dict['d']) if match_i: # Connection `svg:path` matched required format. Extract start and # end coordinates. match_dict_i = match_i.groupdict() if match_dict_i['end_vy']: # Path ended with vertical line match_dict_i['end_x'] = match_dict_i['start_x'] match_dict_i['end_y'] = match_dict_i['end_vy'] if match_dict_i['end_hx']: # Path ended with horizontal line match_dict_i['end_x'] = match_dict_i['end_hx'] match_dict_i['end_y'] = match_dict_i['start_y'] # Append record for end points of current path. frames.append([path_i_dict['id']] + list(map(float, (match_dict_i['start_x'], match_dict_i['start_y'], match_dict_i['end_x'], match_dict_i['end_y'])))) if not frames: return pd.DataFrame(None, columns=['source', 'target']) df_connection_lines = pd.DataFrame(frames, columns=['id'] + coords_columns) # Use `shapes_canvas.find_shape` to determine shapes overlapped by end # points of each `svg:path` or `svg:line`. df_shape_connections_i = pd.DataFrame([[shapes_canvas.find_shape(x1, y1), shapes_canvas.find_shape(x2, y2)] for i, (x1, y1, x2, y2) in df_connection_lines[coords_columns] .iterrows()], columns=['source', 'target']) # Order the source and target of each row so the source shape identifier is # always the lowest. df_shape_connections_i.sort_index(axis=1, inplace=True) # Tag each shape connection with the corresponding `svg:line`/`svg:path` # identifier. May be useful, e.g., in debugging. df_shape_connections_i['line_id'] = df_connection_lines['id'] # Remove connections where source or target shape was not matched (e.g., if # one or more end points does not overlap with a shape). return df_shape_connections_i.dropna()
[ "def", "extract_connections", "(", "svg_source", ",", "shapes_canvas", ",", "line_layer", "=", "'Connections'", ",", "line_xpath", "=", "None", ",", "path_xpath", "=", "None", ",", "namespaces", "=", "None", ")", ":", "from", "lxml", "import", "etree", "if", "namespaces", "is", "None", ":", "# Inkscape namespace is required to match SVG elements as well as", "# Inkscape-specific SVG tags and attributes (e.g., `inkscape:label`).", "namespaces", "=", "INKSCAPE_NSMAP", "# Parse SVG source.", "e_root", "=", "etree", ".", "parse", "(", "svg_source", ")", "# List to hold records of form: `[<id>, <x1>, <y1>, <x2>, <y2>]`.", "frames", "=", "[", "]", "if", "line_xpath", "is", "None", ":", "# Define query to look for `svg:line` elements in top level of layer of", "# SVG specified to contain connections.", "line_xpath", "=", "(", "\"//svg:g[@inkscape:label='%s']/svg:line\"", "%", "line_layer", ")", "coords_columns", "=", "[", "'x1'", ",", "'y1'", ",", "'x2'", ",", "'y2'", "]", "for", "line_i", "in", "e_root", ".", "xpath", "(", "line_xpath", ",", "namespaces", "=", "namespaces", ")", ":", "# Extract start and end coordinate from `svg:line` element.", "line_i_dict", "=", "dict", "(", "list", "(", "line_i", ".", "items", "(", ")", ")", ")", "values", "=", "(", "[", "line_i_dict", ".", "get", "(", "'id'", ",", "None", ")", "]", "+", "[", "float", "(", "line_i_dict", "[", "k", "]", ")", "for", "k", "in", "coords_columns", "]", ")", "# Append record for end points of current line.", "frames", ".", "append", "(", "values", ")", "# Regular expression pattern to match start and end coordinates of", "# connection `svg:path` element.", "cre_path_ends", "=", "re", ".", "compile", "(", "r'^\\s*M\\s*(?P<start_x>\\d+(\\.\\d+)?),\\s*'", "r'(?P<start_y>\\d+(\\.\\d+)?).*'", "# Diagonal line...", "r'((L\\s*(?P<end_x>\\d+(\\.\\d+)?),\\s*'", "r'(?P<end_y>\\d+(\\.\\d+)?))|'", "# or Vertical line...", "r'(V\\s*(?P<end_vy>\\d+(\\.\\d+)?))|'", "# or Horizontal line", "r'(H\\s*(?P<end_hx>\\d+(\\.\\d+)?))'", "r')\\D*'", "r'$'", ")", "if", "path_xpath", "is", "None", ":", "# Define query to look for `svg:path` elements in top level of layer of", "# SVG specified to contain connections.", "path_xpath", "=", "(", "\"//svg:g[@inkscape:label='%s']/svg:path\"", "%", "line_layer", ")", "for", "path_i", "in", "e_root", ".", "xpath", "(", "path_xpath", ",", "namespaces", "=", "namespaces", ")", ":", "path_i_dict", "=", "dict", "(", "list", "(", "path_i", ".", "items", "(", ")", ")", ")", "match_i", "=", "cre_path_ends", ".", "match", "(", "path_i_dict", "[", "'d'", "]", ")", "if", "match_i", ":", "# Connection `svg:path` matched required format. Extract start and", "# end coordinates.", "match_dict_i", "=", "match_i", ".", "groupdict", "(", ")", "if", "match_dict_i", "[", "'end_vy'", "]", ":", "# Path ended with vertical line", "match_dict_i", "[", "'end_x'", "]", "=", "match_dict_i", "[", "'start_x'", "]", "match_dict_i", "[", "'end_y'", "]", "=", "match_dict_i", "[", "'end_vy'", "]", "if", "match_dict_i", "[", "'end_hx'", "]", ":", "# Path ended with horizontal line", "match_dict_i", "[", "'end_x'", "]", "=", "match_dict_i", "[", "'end_hx'", "]", "match_dict_i", "[", "'end_y'", "]", "=", "match_dict_i", "[", "'start_y'", "]", "# Append record for end points of current path.", "frames", ".", "append", "(", "[", "path_i_dict", "[", "'id'", "]", "]", "+", "list", "(", "map", "(", "float", ",", "(", "match_dict_i", "[", "'start_x'", "]", ",", "match_dict_i", "[", "'start_y'", "]", ",", "match_dict_i", "[", "'end_x'", "]", ",", "match_dict_i", "[", "'end_y'", "]", ")", ")", ")", ")", "if", "not", "frames", ":", "return", "pd", ".", "DataFrame", "(", "None", ",", "columns", "=", "[", "'source'", ",", "'target'", "]", ")", "df_connection_lines", "=", "pd", ".", "DataFrame", "(", "frames", ",", "columns", "=", "[", "'id'", "]", "+", "coords_columns", ")", "# Use `shapes_canvas.find_shape` to determine shapes overlapped by end", "# points of each `svg:path` or `svg:line`.", "df_shape_connections_i", "=", "pd", ".", "DataFrame", "(", "[", "[", "shapes_canvas", ".", "find_shape", "(", "x1", ",", "y1", ")", ",", "shapes_canvas", ".", "find_shape", "(", "x2", ",", "y2", ")", "]", "for", "i", ",", "(", "x1", ",", "y1", ",", "x2", ",", "y2", ")", "in", "df_connection_lines", "[", "coords_columns", "]", ".", "iterrows", "(", ")", "]", ",", "columns", "=", "[", "'source'", ",", "'target'", "]", ")", "# Order the source and target of each row so the source shape identifier is", "# always the lowest.", "df_shape_connections_i", ".", "sort_index", "(", "axis", "=", "1", ",", "inplace", "=", "True", ")", "# Tag each shape connection with the corresponding `svg:line`/`svg:path`", "# identifier. May be useful, e.g., in debugging.", "df_shape_connections_i", "[", "'line_id'", "]", "=", "df_connection_lines", "[", "'id'", "]", "# Remove connections where source or target shape was not matched (e.g., if", "# one or more end points does not overlap with a shape).", "return", "df_shape_connections_i", ".", "dropna", "(", ")" ]
Load all ``<svg:line>`` elements and ``<svg:path>`` elements from a layer of an SVG source. For each element, if endpoints overlap distinct shapes in :data:`shapes_canvas`, add connection between overlapped shapes. .. versionchanged:: 0.6.post1 Allow both ``<svg:line>`` *and* ``<svg:path>`` instances to denote connected/adjacent shapes. .. versionadded:: 0.6.post1 :data:`path_xpath` Parameters ---------- svg_source : filepath Input SVG file containing connection lines. shapes_canvas : shapes_canvas.ShapesCanvas Shapes canvas containing shapes to compare against connection endpoints. line_layer : str Name of layer in SVG containing connection lines. line_xpath : str XPath string to iterate through connection lines. path_xpath : str XPath string to iterate through connection paths. namespaces : dict SVG namespaces (compatible with :func:`etree.parse`). Returns ------- pandas.DataFrame Each row corresponds to connection between two shapes in :data:`shapes_canvas`, denoted ``source`` and ``target``.
[ "Load", "all", "<svg", ":", "line", ">", "elements", "and", "<svg", ":", "path", ">", "elements", "from", "a", "layer", "of", "an", "SVG", "source", ".", "For", "each", "element", "if", "endpoints", "overlap", "distinct", "shapes", "in", ":", "data", ":", "shapes_canvas", "add", "connection", "between", "overlapped", "shapes", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/connections.py#L144-L268
PolyJIT/benchbuild
benchbuild/reports/__init__.py
discover
def discover(): """ Import all experiments listed in *_PLUGINS_REPORTS. Tests: >>> from benchbuild.settings import CFG >>> from benchbuild.reports import discover >>> import logging as lg >>> import sys >>> l = lg.getLogger('benchbuild') >>> l.setLevel(lg.DEBUG) >>> l.handlers = [lg.StreamHandler(stream=sys.stdout)] >>> CFG["plugins"]["reports"] = ["benchbuild.non.existing", "benchbuild.reports.raw"] >>> discover() Could not find 'benchbuild.non.existing' Found report: benchbuild.reports.raw """ if CFG["plugins"]["autoload"]: report_plugins = CFG["plugins"]["reports"].value for plugin in report_plugins: try: importlib.import_module(plugin) LOG.debug("Found report: %s", plugin) except ImportError: LOG.error("Could not find '%s'", plugin)
python
def discover(): """ Import all experiments listed in *_PLUGINS_REPORTS. Tests: >>> from benchbuild.settings import CFG >>> from benchbuild.reports import discover >>> import logging as lg >>> import sys >>> l = lg.getLogger('benchbuild') >>> l.setLevel(lg.DEBUG) >>> l.handlers = [lg.StreamHandler(stream=sys.stdout)] >>> CFG["plugins"]["reports"] = ["benchbuild.non.existing", "benchbuild.reports.raw"] >>> discover() Could not find 'benchbuild.non.existing' Found report: benchbuild.reports.raw """ if CFG["plugins"]["autoload"]: report_plugins = CFG["plugins"]["reports"].value for plugin in report_plugins: try: importlib.import_module(plugin) LOG.debug("Found report: %s", plugin) except ImportError: LOG.error("Could not find '%s'", plugin)
[ "def", "discover", "(", ")", ":", "if", "CFG", "[", "\"plugins\"", "]", "[", "\"autoload\"", "]", ":", "report_plugins", "=", "CFG", "[", "\"plugins\"", "]", "[", "\"reports\"", "]", ".", "value", "for", "plugin", "in", "report_plugins", ":", "try", ":", "importlib", ".", "import_module", "(", "plugin", ")", "LOG", ".", "debug", "(", "\"Found report: %s\"", ",", "plugin", ")", "except", "ImportError", ":", "LOG", ".", "error", "(", "\"Could not find '%s'\"", ",", "plugin", ")" ]
Import all experiments listed in *_PLUGINS_REPORTS. Tests: >>> from benchbuild.settings import CFG >>> from benchbuild.reports import discover >>> import logging as lg >>> import sys >>> l = lg.getLogger('benchbuild') >>> l.setLevel(lg.DEBUG) >>> l.handlers = [lg.StreamHandler(stream=sys.stdout)] >>> CFG["plugins"]["reports"] = ["benchbuild.non.existing", "benchbuild.reports.raw"] >>> discover() Could not find 'benchbuild.non.existing' Found report: benchbuild.reports.raw
[ "Import", "all", "experiments", "listed", "in", "*", "_PLUGINS_REPORTS", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/reports/__init__.py#L17-L41
BlueBrain/hpcbench
hpcbench/export/es.py
ESExporter.es_client
def es_client(self): """Get Elasticsearch client """ es_conf = self.campaign.export.elasticsearch return Elasticsearch(self.hosts, **es_conf.connection_params)
python
def es_client(self): """Get Elasticsearch client """ es_conf = self.campaign.export.elasticsearch return Elasticsearch(self.hosts, **es_conf.connection_params)
[ "def", "es_client", "(", "self", ")", ":", "es_conf", "=", "self", ".", "campaign", ".", "export", ".", "elasticsearch", "return", "Elasticsearch", "(", "self", ".", "hosts", ",", "*", "*", "es_conf", ".", "connection_params", ")" ]
Get Elasticsearch client
[ "Get", "Elasticsearch", "client" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/export/es.py#L42-L46
BlueBrain/hpcbench
hpcbench/export/es.py
ESExporter.index_name
def index_name(self): """Get Elasticsearch index name associated to the campaign """ fmt = self.campaign.export.elasticsearch.index_name fields = dict(date=self.report['date']) return fmt.format(**fields).lower()
python
def index_name(self): """Get Elasticsearch index name associated to the campaign """ fmt = self.campaign.export.elasticsearch.index_name fields = dict(date=self.report['date']) return fmt.format(**fields).lower()
[ "def", "index_name", "(", "self", ")", ":", "fmt", "=", "self", ".", "campaign", ".", "export", ".", "elasticsearch", ".", "index_name", "fields", "=", "dict", "(", "date", "=", "self", ".", "report", "[", "'date'", "]", ")", "return", "fmt", ".", "format", "(", "*", "*", "fields", ")", ".", "lower", "(", ")" ]
Get Elasticsearch index name associated to the campaign
[ "Get", "Elasticsearch", "index", "name", "associated", "to", "the", "campaign" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/export/es.py#L55-L60
BlueBrain/hpcbench
hpcbench/export/es.py
ESExporter.remove_index
def remove_index(self): """Remove Elasticsearch index associated to the campaign""" self.index_client.close(self.index_name) self.index_client.delete(self.index_name)
python
def remove_index(self): """Remove Elasticsearch index associated to the campaign""" self.index_client.close(self.index_name) self.index_client.delete(self.index_name)
[ "def", "remove_index", "(", "self", ")", ":", "self", ".", "index_client", ".", "close", "(", "self", ".", "index_name", ")", "self", ".", "index_client", ".", "delete", "(", "self", ".", "index_name", ")" ]
Remove Elasticsearch index associated to the campaign
[ "Remove", "Elasticsearch", "index", "associated", "to", "the", "campaign" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/export/es.py#L69-L72
sci-bots/svg-model
docs/rename.py
parse_args
def parse_args(args=None): """Parses arguments, returns (options, args).""" from argparse import ArgumentParser if args is None: args = sys.argv parser = ArgumentParser(description='Rename template project with' 'hyphen-separated <new name> (path names and in ' 'files).') parser.add_argument('new_name', help='New project name (e.g., ' ' `my-new-project`)') args = parser.parse_args() return args
python
def parse_args(args=None): """Parses arguments, returns (options, args).""" from argparse import ArgumentParser if args is None: args = sys.argv parser = ArgumentParser(description='Rename template project with' 'hyphen-separated <new name> (path names and in ' 'files).') parser.add_argument('new_name', help='New project name (e.g., ' ' `my-new-project`)') args = parser.parse_args() return args
[ "def", "parse_args", "(", "args", "=", "None", ")", ":", "from", "argparse", "import", "ArgumentParser", "if", "args", "is", "None", ":", "args", "=", "sys", ".", "argv", "parser", "=", "ArgumentParser", "(", "description", "=", "'Rename template project with'", "'hyphen-separated <new name> (path names and in '", "'files).'", ")", "parser", ".", "add_argument", "(", "'new_name'", ",", "help", "=", "'New project name (e.g., '", "' `my-new-project`)'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "return", "args" ]
Parses arguments, returns (options, args).
[ "Parses", "arguments", "returns", "(", "options", "args", ")", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/docs/rename.py#L51-L65
BlueBrain/hpcbench
hpcbench/toolbox/buildinfo.py
extract_build_info
def extract_build_info(exe_path, elf_section=ELF_SECTION): """Extracts the build information from a given executable. The build information is expected to be in json format, which is parsed and returned as a dictionary. If no build information is found an empty dictionary is returned. This assumes binutils 2.25 to work. Args: exe_path (str): The full path to the executable to be examined Returns: dict: A dictionary of the extracted information. """ build_info = {} with mkdtemp() as tempd, pushd(tempd): proc = subprocess.Popen( [ OBJCOPY, DUMP_SECTION, "{secn}={ofile}".format(secn=elf_section, ofile=BUILDINFO_FILE), exe_path, ], stderr=subprocess.PIPE, ) proc.wait() errno = proc.returncode stderr = proc.stderr.read() if errno or len(stderr): # just return the empty dict LOGGER.warning('objcopy failed with errno %s.', errno) if len(stderr): LOGGER.warning('objcopy failed with following msg:\n%s', stderr) return build_info with open(BUILDINFO_FILE) as build_info_f: try: build_info = json.load(build_info_f, object_hook=byteify) except JSONDcdError as jsde: LOGGER.warning('benchmark executable build is not valid json:') LOGGER.warning(jsde.msg) LOGGER.warning('build info section content:') LOGGER.warning(jsde.doc) return build_info
python
def extract_build_info(exe_path, elf_section=ELF_SECTION): """Extracts the build information from a given executable. The build information is expected to be in json format, which is parsed and returned as a dictionary. If no build information is found an empty dictionary is returned. This assumes binutils 2.25 to work. Args: exe_path (str): The full path to the executable to be examined Returns: dict: A dictionary of the extracted information. """ build_info = {} with mkdtemp() as tempd, pushd(tempd): proc = subprocess.Popen( [ OBJCOPY, DUMP_SECTION, "{secn}={ofile}".format(secn=elf_section, ofile=BUILDINFO_FILE), exe_path, ], stderr=subprocess.PIPE, ) proc.wait() errno = proc.returncode stderr = proc.stderr.read() if errno or len(stderr): # just return the empty dict LOGGER.warning('objcopy failed with errno %s.', errno) if len(stderr): LOGGER.warning('objcopy failed with following msg:\n%s', stderr) return build_info with open(BUILDINFO_FILE) as build_info_f: try: build_info = json.load(build_info_f, object_hook=byteify) except JSONDcdError as jsde: LOGGER.warning('benchmark executable build is not valid json:') LOGGER.warning(jsde.msg) LOGGER.warning('build info section content:') LOGGER.warning(jsde.doc) return build_info
[ "def", "extract_build_info", "(", "exe_path", ",", "elf_section", "=", "ELF_SECTION", ")", ":", "build_info", "=", "{", "}", "with", "mkdtemp", "(", ")", "as", "tempd", ",", "pushd", "(", "tempd", ")", ":", "proc", "=", "subprocess", ".", "Popen", "(", "[", "OBJCOPY", ",", "DUMP_SECTION", ",", "\"{secn}={ofile}\"", ".", "format", "(", "secn", "=", "elf_section", ",", "ofile", "=", "BUILDINFO_FILE", ")", ",", "exe_path", ",", "]", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", ")", "proc", ".", "wait", "(", ")", "errno", "=", "proc", ".", "returncode", "stderr", "=", "proc", ".", "stderr", ".", "read", "(", ")", "if", "errno", "or", "len", "(", "stderr", ")", ":", "# just return the empty dict", "LOGGER", ".", "warning", "(", "'objcopy failed with errno %s.'", ",", "errno", ")", "if", "len", "(", "stderr", ")", ":", "LOGGER", ".", "warning", "(", "'objcopy failed with following msg:\\n%s'", ",", "stderr", ")", "return", "build_info", "with", "open", "(", "BUILDINFO_FILE", ")", "as", "build_info_f", ":", "try", ":", "build_info", "=", "json", ".", "load", "(", "build_info_f", ",", "object_hook", "=", "byteify", ")", "except", "JSONDcdError", "as", "jsde", ":", "LOGGER", ".", "warning", "(", "'benchmark executable build is not valid json:'", ")", "LOGGER", ".", "warning", "(", "jsde", ".", "msg", ")", "LOGGER", ".", "warning", "(", "'build info section content:'", ")", "LOGGER", ".", "warning", "(", "jsde", ".", "doc", ")", "return", "build_info" ]
Extracts the build information from a given executable. The build information is expected to be in json format, which is parsed and returned as a dictionary. If no build information is found an empty dictionary is returned. This assumes binutils 2.25 to work. Args: exe_path (str): The full path to the executable to be examined Returns: dict: A dictionary of the extracted information.
[ "Extracts", "the", "build", "information", "from", "a", "given", "executable", "." ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/buildinfo.py#L25-L68
lazygunner/xunleipy
xunleipy/rk.py
RClient.rk_create
def rk_create(self, im, im_type, timeout=60): """ im: 图片字节 im_type: 题目类型 """ params = { 'typeid': im_type, 'timeout': timeout, } params.update(self.base_params) files = {'image': ('check_code.png', im)} r = requests.post('http://api.ruokuai.com/create.json', data=params, files=files, headers=self.headers) return r.json()
python
def rk_create(self, im, im_type, timeout=60): """ im: 图片字节 im_type: 题目类型 """ params = { 'typeid': im_type, 'timeout': timeout, } params.update(self.base_params) files = {'image': ('check_code.png', im)} r = requests.post('http://api.ruokuai.com/create.json', data=params, files=files, headers=self.headers) return r.json()
[ "def", "rk_create", "(", "self", ",", "im", ",", "im_type", ",", "timeout", "=", "60", ")", ":", "params", "=", "{", "'typeid'", ":", "im_type", ",", "'timeout'", ":", "timeout", ",", "}", "params", ".", "update", "(", "self", ".", "base_params", ")", "files", "=", "{", "'image'", ":", "(", "'check_code.png'", ",", "im", ")", "}", "r", "=", "requests", ".", "post", "(", "'http://api.ruokuai.com/create.json'", ",", "data", "=", "params", ",", "files", "=", "files", ",", "headers", "=", "self", ".", "headers", ")", "return", "r", ".", "json", "(", ")" ]
im: 图片字节 im_type: 题目类型
[ "im", ":", "图片字节", "im_type", ":", "题目类型" ]
train
https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rk.py#L32-L44
lazygunner/xunleipy
xunleipy/rk.py
RClient.rk_report_error
def rk_report_error(self, im_id): """ im_id:报错题目的ID """ params = { 'id': im_id, } params.update(self.base_params) r = requests.post('http://api.ruokuai.com/reporterror.json', data=params, headers=self.headers) return r.json()
python
def rk_report_error(self, im_id): """ im_id:报错题目的ID """ params = { 'id': im_id, } params.update(self.base_params) r = requests.post('http://api.ruokuai.com/reporterror.json', data=params, headers=self.headers) return r.json()
[ "def", "rk_report_error", "(", "self", ",", "im_id", ")", ":", "params", "=", "{", "'id'", ":", "im_id", ",", "}", "params", ".", "update", "(", "self", ".", "base_params", ")", "r", "=", "requests", ".", "post", "(", "'http://api.ruokuai.com/reporterror.json'", ",", "data", "=", "params", ",", "headers", "=", "self", ".", "headers", ")", "return", "r", ".", "json", "(", ")" ]
im_id:报错题目的ID
[ "im_id", ":", "报错题目的ID" ]
train
https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rk.py#L46-L55
eng-tools/sfsimodels
sfsimodels/scores.py
lc_score
def lc_score(value): """ Evaluates the accuracy of a predictive measure (e.g. r-squared) :param value: float, between 0.0 and 1.0. :return: """ rebased = 2 * (value - 0.5) if rebased == 0: return 0 elif rebased > 0: compliment = 1.0 - rebased score = - np.log2(compliment) else: compliment = 1.0 + rebased score = np.log2(compliment) return score
python
def lc_score(value): """ Evaluates the accuracy of a predictive measure (e.g. r-squared) :param value: float, between 0.0 and 1.0. :return: """ rebased = 2 * (value - 0.5) if rebased == 0: return 0 elif rebased > 0: compliment = 1.0 - rebased score = - np.log2(compliment) else: compliment = 1.0 + rebased score = np.log2(compliment) return score
[ "def", "lc_score", "(", "value", ")", ":", "rebased", "=", "2", "*", "(", "value", "-", "0.5", ")", "if", "rebased", "==", "0", ":", "return", "0", "elif", "rebased", ">", "0", ":", "compliment", "=", "1.0", "-", "rebased", "score", "=", "-", "np", ".", "log2", "(", "compliment", ")", "else", ":", "compliment", "=", "1.0", "+", "rebased", "score", "=", "np", ".", "log2", "(", "compliment", ")", "return", "score" ]
Evaluates the accuracy of a predictive measure (e.g. r-squared) :param value: float, between 0.0 and 1.0. :return:
[ "Evaluates", "the", "accuracy", "of", "a", "predictive", "measure", "(", "e", ".", "g", ".", "r", "-", "squared", ")" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/scores.py#L4-L21
PolyJIT/benchbuild
benchbuild/utils/uchroot.py
uchroot
def uchroot(*args, **kwargs): """ Return a customizable uchroot command. Args: args: List of additional arguments for uchroot (typical: mounts) Return: chroot_cmd """ uchroot_cmd = with_mounts(*args, uchroot_cmd_fn=no_llvm, **kwargs) return uchroot_cmd["--"]
python
def uchroot(*args, **kwargs): """ Return a customizable uchroot command. Args: args: List of additional arguments for uchroot (typical: mounts) Return: chroot_cmd """ uchroot_cmd = with_mounts(*args, uchroot_cmd_fn=no_llvm, **kwargs) return uchroot_cmd["--"]
[ "def", "uchroot", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "uchroot_cmd", "=", "with_mounts", "(", "*", "args", ",", "uchroot_cmd_fn", "=", "no_llvm", ",", "*", "*", "kwargs", ")", "return", "uchroot_cmd", "[", "\"--\"", "]" ]
Return a customizable uchroot command. Args: args: List of additional arguments for uchroot (typical: mounts) Return: chroot_cmd
[ "Return", "a", "customizable", "uchroot", "command", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L13-L23
PolyJIT/benchbuild
benchbuild/utils/uchroot.py
no_llvm
def no_llvm(*args, uid=0, gid=0, **kwargs): """ Return a customizable uchroot command. The command will be executed inside a uchroot environment. Args: args: List of additional arguments for uchroot (typical: mounts) Return: chroot_cmd """ uchroot_cmd = no_args() uchroot_cmd = uchroot_cmd[__default_opts__(uid, gid)] return uchroot_cmd[args]
python
def no_llvm(*args, uid=0, gid=0, **kwargs): """ Return a customizable uchroot command. The command will be executed inside a uchroot environment. Args: args: List of additional arguments for uchroot (typical: mounts) Return: chroot_cmd """ uchroot_cmd = no_args() uchroot_cmd = uchroot_cmd[__default_opts__(uid, gid)] return uchroot_cmd[args]
[ "def", "no_llvm", "(", "*", "args", ",", "uid", "=", "0", ",", "gid", "=", "0", ",", "*", "*", "kwargs", ")", ":", "uchroot_cmd", "=", "no_args", "(", ")", "uchroot_cmd", "=", "uchroot_cmd", "[", "__default_opts__", "(", "uid", ",", "gid", ")", "]", "return", "uchroot_cmd", "[", "args", "]" ]
Return a customizable uchroot command. The command will be executed inside a uchroot environment. Args: args: List of additional arguments for uchroot (typical: mounts) Return: chroot_cmd
[ "Return", "a", "customizable", "uchroot", "command", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L34-L47
PolyJIT/benchbuild
benchbuild/utils/uchroot.py
no_args
def no_args(**kwargs): """Return the uchroot command without any customizations.""" from benchbuild.utils.cmd import uchroot as uchrt prefixes = CFG["container"]["prefixes"].value p_paths, p_libs = env(prefixes) uchrt = run.with_env_recursive( uchrt, LD_LIBRARY_PATH=path.list_to_path(p_libs), PATH=path.list_to_path(p_paths)) return uchrt
python
def no_args(**kwargs): """Return the uchroot command without any customizations.""" from benchbuild.utils.cmd import uchroot as uchrt prefixes = CFG["container"]["prefixes"].value p_paths, p_libs = env(prefixes) uchrt = run.with_env_recursive( uchrt, LD_LIBRARY_PATH=path.list_to_path(p_libs), PATH=path.list_to_path(p_paths)) return uchrt
[ "def", "no_args", "(", "*", "*", "kwargs", ")", ":", "from", "benchbuild", ".", "utils", ".", "cmd", "import", "uchroot", "as", "uchrt", "prefixes", "=", "CFG", "[", "\"container\"", "]", "[", "\"prefixes\"", "]", ".", "value", "p_paths", ",", "p_libs", "=", "env", "(", "prefixes", ")", "uchrt", "=", "run", ".", "with_env_recursive", "(", "uchrt", ",", "LD_LIBRARY_PATH", "=", "path", ".", "list_to_path", "(", "p_libs", ")", ",", "PATH", "=", "path", ".", "list_to_path", "(", "p_paths", ")", ")", "return", "uchrt" ]
Return the uchroot command without any customizations.
[ "Return", "the", "uchroot", "command", "without", "any", "customizations", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L50-L61
PolyJIT/benchbuild
benchbuild/utils/uchroot.py
with_mounts
def with_mounts(*args, uchroot_cmd_fn=no_args, **kwargs): """Return a uchroot command with all mounts enabled.""" mounts = CFG["container"]["mounts"].value prefixes = CFG["container"]["prefixes"].value uchroot_opts, mounts = __mounts__("mnt", mounts) uchroot_cmd = uchroot_cmd_fn(**kwargs) uchroot_cmd = uchroot_cmd[uchroot_opts] uchroot_cmd = uchroot_cmd[args] paths, libs = env(mounts) prefix_paths, prefix_libs = env(prefixes) uchroot_cmd = run.with_env_recursive( uchroot_cmd, LD_LIBRARY_PATH=path.list_to_path(libs + prefix_libs), PATH=path.list_to_path(paths + prefix_paths)) return uchroot_cmd
python
def with_mounts(*args, uchroot_cmd_fn=no_args, **kwargs): """Return a uchroot command with all mounts enabled.""" mounts = CFG["container"]["mounts"].value prefixes = CFG["container"]["prefixes"].value uchroot_opts, mounts = __mounts__("mnt", mounts) uchroot_cmd = uchroot_cmd_fn(**kwargs) uchroot_cmd = uchroot_cmd[uchroot_opts] uchroot_cmd = uchroot_cmd[args] paths, libs = env(mounts) prefix_paths, prefix_libs = env(prefixes) uchroot_cmd = run.with_env_recursive( uchroot_cmd, LD_LIBRARY_PATH=path.list_to_path(libs + prefix_libs), PATH=path.list_to_path(paths + prefix_paths)) return uchroot_cmd
[ "def", "with_mounts", "(", "*", "args", ",", "uchroot_cmd_fn", "=", "no_args", ",", "*", "*", "kwargs", ")", ":", "mounts", "=", "CFG", "[", "\"container\"", "]", "[", "\"mounts\"", "]", ".", "value", "prefixes", "=", "CFG", "[", "\"container\"", "]", "[", "\"prefixes\"", "]", ".", "value", "uchroot_opts", ",", "mounts", "=", "__mounts__", "(", "\"mnt\"", ",", "mounts", ")", "uchroot_cmd", "=", "uchroot_cmd_fn", "(", "*", "*", "kwargs", ")", "uchroot_cmd", "=", "uchroot_cmd", "[", "uchroot_opts", "]", "uchroot_cmd", "=", "uchroot_cmd", "[", "args", "]", "paths", ",", "libs", "=", "env", "(", "mounts", ")", "prefix_paths", ",", "prefix_libs", "=", "env", "(", "prefixes", ")", "uchroot_cmd", "=", "run", ".", "with_env_recursive", "(", "uchroot_cmd", ",", "LD_LIBRARY_PATH", "=", "path", ".", "list_to_path", "(", "libs", "+", "prefix_libs", ")", ",", "PATH", "=", "path", ".", "list_to_path", "(", "paths", "+", "prefix_paths", ")", ")", "return", "uchroot_cmd" ]
Return a uchroot command with all mounts enabled.
[ "Return", "a", "uchroot", "command", "with", "all", "mounts", "enabled", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L64-L80
PolyJIT/benchbuild
benchbuild/utils/uchroot.py
clean_env
def clean_env(uchroot_cmd, varnames): """Returns a uchroot cmd that runs inside a filtered environment.""" env = uchroot_cmd["/usr/bin/env"] __clean_env = env["-u", ",".join(varnames)] return __clean_env
python
def clean_env(uchroot_cmd, varnames): """Returns a uchroot cmd that runs inside a filtered environment.""" env = uchroot_cmd["/usr/bin/env"] __clean_env = env["-u", ",".join(varnames)] return __clean_env
[ "def", "clean_env", "(", "uchroot_cmd", ",", "varnames", ")", ":", "env", "=", "uchroot_cmd", "[", "\"/usr/bin/env\"", "]", "__clean_env", "=", "env", "[", "\"-u\"", ",", "\",\"", ".", "join", "(", "varnames", ")", "]", "return", "__clean_env" ]
Returns a uchroot cmd that runs inside a filtered environment.
[ "Returns", "a", "uchroot", "cmd", "that", "runs", "inside", "a", "filtered", "environment", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L121-L125
PolyJIT/benchbuild
benchbuild/utils/uchroot.py
mounts
def mounts(prefix, __mounts): """ Compute the mountpoints of the current user. Args: prefix: Define where the job was running if it ran on a cluster. mounts: All mounts the user currently uses in his file system. Return: mntpoints """ i = 0 mntpoints = [] for mount in __mounts: if not isinstance(mount, dict): mntpoint = "{0}/{1}".format(prefix, str(i)) mntpoints.append(mntpoint) i = i + 1 return mntpoints
python
def mounts(prefix, __mounts): """ Compute the mountpoints of the current user. Args: prefix: Define where the job was running if it ran on a cluster. mounts: All mounts the user currently uses in his file system. Return: mntpoints """ i = 0 mntpoints = [] for mount in __mounts: if not isinstance(mount, dict): mntpoint = "{0}/{1}".format(prefix, str(i)) mntpoints.append(mntpoint) i = i + 1 return mntpoints
[ "def", "mounts", "(", "prefix", ",", "__mounts", ")", ":", "i", "=", "0", "mntpoints", "=", "[", "]", "for", "mount", "in", "__mounts", ":", "if", "not", "isinstance", "(", "mount", ",", "dict", ")", ":", "mntpoint", "=", "\"{0}/{1}\"", ".", "format", "(", "prefix", ",", "str", "(", "i", ")", ")", "mntpoints", ".", "append", "(", "mntpoint", ")", "i", "=", "i", "+", "1", "return", "mntpoints" ]
Compute the mountpoints of the current user. Args: prefix: Define where the job was running if it ran on a cluster. mounts: All mounts the user currently uses in his file system. Return: mntpoints
[ "Compute", "the", "mountpoints", "of", "the", "current", "user", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L128-L145
PolyJIT/benchbuild
benchbuild/utils/uchroot.py
env
def env(mounts): """ Compute the environment of the change root for the user. Args: mounts: The mountpoints of the current user. Return: paths ld_libs """ f_mounts = [m.strip("/") for m in mounts] root = local.path("/") ld_libs = [root / m / "lib" for m in f_mounts] ld_libs.extend([root / m / "lib64" for m in f_mounts]) paths = [root / m / "bin" for m in f_mounts] paths.extend([root / m / "sbin" for m in f_mounts]) paths.extend([root / m for m in f_mounts]) return paths, ld_libs
python
def env(mounts): """ Compute the environment of the change root for the user. Args: mounts: The mountpoints of the current user. Return: paths ld_libs """ f_mounts = [m.strip("/") for m in mounts] root = local.path("/") ld_libs = [root / m / "lib" for m in f_mounts] ld_libs.extend([root / m / "lib64" for m in f_mounts]) paths = [root / m / "bin" for m in f_mounts] paths.extend([root / m / "sbin" for m in f_mounts]) paths.extend([root / m for m in f_mounts]) return paths, ld_libs
[ "def", "env", "(", "mounts", ")", ":", "f_mounts", "=", "[", "m", ".", "strip", "(", "\"/\"", ")", "for", "m", "in", "mounts", "]", "root", "=", "local", ".", "path", "(", "\"/\"", ")", "ld_libs", "=", "[", "root", "/", "m", "/", "\"lib\"", "for", "m", "in", "f_mounts", "]", "ld_libs", ".", "extend", "(", "[", "root", "/", "m", "/", "\"lib64\"", "for", "m", "in", "f_mounts", "]", ")", "paths", "=", "[", "root", "/", "m", "/", "\"bin\"", "for", "m", "in", "f_mounts", "]", "paths", ".", "extend", "(", "[", "root", "/", "m", "/", "\"sbin\"", "for", "m", "in", "f_mounts", "]", ")", "paths", ".", "extend", "(", "[", "root", "/", "m", "for", "m", "in", "f_mounts", "]", ")", "return", "paths", ",", "ld_libs" ]
Compute the environment of the change root for the user. Args: mounts: The mountpoints of the current user. Return: paths ld_libs
[ "Compute", "the", "environment", "of", "the", "change", "root", "for", "the", "user", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L166-L186
mromanello/hucitlib
knowledge_base/__init__.py
get_abbreviations
def get_abbreviations(kb): """ For the sake of profiling. """ return {"%s$$n%i" % (author.get_urn(), i): abbrev for author in kb.get_authors() for i, abbrev in enumerate(author.get_abbreviations()) if author.get_urn() is not None}
python
def get_abbreviations(kb): """ For the sake of profiling. """ return {"%s$$n%i" % (author.get_urn(), i): abbrev for author in kb.get_authors() for i, abbrev in enumerate(author.get_abbreviations()) if author.get_urn() is not None}
[ "def", "get_abbreviations", "(", "kb", ")", ":", "return", "{", "\"%s$$n%i\"", "%", "(", "author", ".", "get_urn", "(", ")", ",", "i", ")", ":", "abbrev", "for", "author", "in", "kb", ".", "get_authors", "(", ")", "for", "i", ",", "abbrev", "in", "enumerate", "(", "author", ".", "get_abbreviations", "(", ")", ")", "if", "author", ".", "get_urn", "(", ")", "is", "not", "None", "}" ]
For the sake of profiling.
[ "For", "the", "sake", "of", "profiling", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L21-L28
mromanello/hucitlib
knowledge_base/__init__.py
KnowledgeBase.author_names
def author_names(self): """ Returns a dictionary like this: { "urn:cts:greekLit:tlg0012$$n1" : "Homer" , "urn:cts:greekLit:tlg0012$$n2" : "Omero" , ... } """ return {"%s$$n%i" % (author.get_urn(), i): name[1] for author in self.get_authors() for i, name in enumerate(author.get_names()) if author.get_urn() is not None}
python
def author_names(self): """ Returns a dictionary like this: { "urn:cts:greekLit:tlg0012$$n1" : "Homer" , "urn:cts:greekLit:tlg0012$$n2" : "Omero" , ... } """ return {"%s$$n%i" % (author.get_urn(), i): name[1] for author in self.get_authors() for i, name in enumerate(author.get_names()) if author.get_urn() is not None}
[ "def", "author_names", "(", "self", ")", ":", "return", "{", "\"%s$$n%i\"", "%", "(", "author", ".", "get_urn", "(", ")", ",", "i", ")", ":", "name", "[", "1", "]", "for", "author", "in", "self", ".", "get_authors", "(", ")", "for", "i", ",", "name", "in", "enumerate", "(", "author", ".", "get_names", "(", ")", ")", "if", "author", ".", "get_urn", "(", ")", "is", "not", "None", "}" ]
Returns a dictionary like this: { "urn:cts:greekLit:tlg0012$$n1" : "Homer" , "urn:cts:greekLit:tlg0012$$n2" : "Omero" , ... }
[ "Returns", "a", "dictionary", "like", "this", ":" ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L114-L127
mromanello/hucitlib
knowledge_base/__init__.py
KnowledgeBase.get_resource_by_urn
def get_resource_by_urn(self, urn): """Fetch the resource corresponding to the input CTS URN. Currently supports only HucitAuthor and HucitWork. :param urn: the CTS URN of the resource to fetch :return: either an instance of `HucitAuthor` or of `HucitWork` """ search_query = """ PREFIX frbroo: <http://erlangen-crm.org/efrbroo/> PREFIX crm: <http://erlangen-crm.org/current/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> SELECT ?resource_URI WHERE { ?resource_URI crm:P1_is_identified_by ?urn . ?urn a crm:E42_Identifier . ?urn rdfs:label "%s" } """ % urn # check type of the input URN try: assert isinstance(urn, CTS_URN) except Exception as e: # convert to pyCTS.CTS_URN if it's a string urn = CTS_URN(urn) logger.debug('Converted the input urn from string to %s' % type( CTS_URN )) if (urn.work is not None): Work = self._session.get_class(surf.ns.EFRBROO['F1_Work']) result = self._store.execute_sparql(search_query) if len(result['results']['bindings']) == 0: raise ResourceNotFound else: tmp = result['results']['bindings'][0] resource_uri = tmp['resource_URI']['value'] return self._session.get_resource(resource_uri, Work) elif (urn.work is None and urn.textgroup is not None): Person = self._session.get_class(surf.ns.EFRBROO['F10_Person']) result = self._store.execute_sparql(search_query) if len(result['results']['bindings']) == 0: raise ResourceNotFound else: tmp = result['results']['bindings'][0] resource_uri = tmp['resource_URI']['value'] return self._session.get_resource(resource_uri, Person)
python
def get_resource_by_urn(self, urn): """Fetch the resource corresponding to the input CTS URN. Currently supports only HucitAuthor and HucitWork. :param urn: the CTS URN of the resource to fetch :return: either an instance of `HucitAuthor` or of `HucitWork` """ search_query = """ PREFIX frbroo: <http://erlangen-crm.org/efrbroo/> PREFIX crm: <http://erlangen-crm.org/current/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> SELECT ?resource_URI WHERE { ?resource_URI crm:P1_is_identified_by ?urn . ?urn a crm:E42_Identifier . ?urn rdfs:label "%s" } """ % urn # check type of the input URN try: assert isinstance(urn, CTS_URN) except Exception as e: # convert to pyCTS.CTS_URN if it's a string urn = CTS_URN(urn) logger.debug('Converted the input urn from string to %s' % type( CTS_URN )) if (urn.work is not None): Work = self._session.get_class(surf.ns.EFRBROO['F1_Work']) result = self._store.execute_sparql(search_query) if len(result['results']['bindings']) == 0: raise ResourceNotFound else: tmp = result['results']['bindings'][0] resource_uri = tmp['resource_URI']['value'] return self._session.get_resource(resource_uri, Work) elif (urn.work is None and urn.textgroup is not None): Person = self._session.get_class(surf.ns.EFRBROO['F10_Person']) result = self._store.execute_sparql(search_query) if len(result['results']['bindings']) == 0: raise ResourceNotFound else: tmp = result['results']['bindings'][0] resource_uri = tmp['resource_URI']['value'] return self._session.get_resource(resource_uri, Person)
[ "def", "get_resource_by_urn", "(", "self", ",", "urn", ")", ":", "search_query", "=", "\"\"\"\n PREFIX frbroo: <http://erlangen-crm.org/efrbroo/>\n PREFIX crm: <http://erlangen-crm.org/current/>\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n\n SELECT ?resource_URI\n\n WHERE {\n ?resource_URI crm:P1_is_identified_by ?urn .\n ?urn a crm:E42_Identifier .\n ?urn rdfs:label \"%s\"\n }\n \"\"\"", "%", "urn", "# check type of the input URN", "try", ":", "assert", "isinstance", "(", "urn", ",", "CTS_URN", ")", "except", "Exception", "as", "e", ":", "# convert to pyCTS.CTS_URN if it's a string", "urn", "=", "CTS_URN", "(", "urn", ")", "logger", ".", "debug", "(", "'Converted the input urn from string to %s'", "%", "type", "(", "CTS_URN", ")", ")", "if", "(", "urn", ".", "work", "is", "not", "None", ")", ":", "Work", "=", "self", ".", "_session", ".", "get_class", "(", "surf", ".", "ns", ".", "EFRBROO", "[", "'F1_Work'", "]", ")", "result", "=", "self", ".", "_store", ".", "execute_sparql", "(", "search_query", ")", "if", "len", "(", "result", "[", "'results'", "]", "[", "'bindings'", "]", ")", "==", "0", ":", "raise", "ResourceNotFound", "else", ":", "tmp", "=", "result", "[", "'results'", "]", "[", "'bindings'", "]", "[", "0", "]", "resource_uri", "=", "tmp", "[", "'resource_URI'", "]", "[", "'value'", "]", "return", "self", ".", "_session", ".", "get_resource", "(", "resource_uri", ",", "Work", ")", "elif", "(", "urn", ".", "work", "is", "None", "and", "urn", ".", "textgroup", "is", "not", "None", ")", ":", "Person", "=", "self", ".", "_session", ".", "get_class", "(", "surf", ".", "ns", ".", "EFRBROO", "[", "'F10_Person'", "]", ")", "result", "=", "self", ".", "_store", ".", "execute_sparql", "(", "search_query", ")", "if", "len", "(", "result", "[", "'results'", "]", "[", "'bindings'", "]", ")", "==", "0", ":", "raise", "ResourceNotFound", "else", ":", "tmp", "=", "result", "[", "'results'", "]", "[", "'bindings'", "]", "[", "0", "]", "resource_uri", "=", "tmp", "[", "'resource_URI'", "]", "[", "'value'", "]", "return", "self", ".", "_session", ".", "get_resource", "(", "resource_uri", ",", "Person", ")" ]
Fetch the resource corresponding to the input CTS URN. Currently supports only HucitAuthor and HucitWork. :param urn: the CTS URN of the resource to fetch :return: either an instance of `HucitAuthor` or of `HucitWork`
[ "Fetch", "the", "resource", "corresponding", "to", "the", "input", "CTS", "URN", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L151-L202
mromanello/hucitlib
knowledge_base/__init__.py
KnowledgeBase.search
def search(self, search_string): """ Searches for a given string through the resources' labels. :param search_string: :return: an instance of `HucitAuthor` or `HucitWork`. """ query = """ SELECT ?s ?label ?type WHERE { ?s a ?type . ?s rdfs:label ?label . ?label bif:contains "'%s'" . } """ % search_string response = self._session.default_store.execute_sparql(query) results = [(result['s']['value'], result['label']['value'], result['type']['value']) for result in response["results"]["bindings"]] resources = [(label, self._session.get_resource(subject, self._session.get_class(type))) for subject, label, type in results] Name = self._session.get_class(surf.ns.EFRBROO['F12_Name']) Title = self._session.get_class(surf.ns.EFRBROO['E35_Title']) Work = self._session.get_class(surf.ns.EFRBROO['F1_Work']) Person = self._session.get_class(surf.ns.EFRBROO['F10_Person']) result = [] for label, resource in resources: if resource.uri == surf.ns.EFRBROO['E35_Title']: work = Work.get_by(efrbroo_P102_has_title = resource).first() result.append((label, work)) elif resource.uri == surf.ns.EFRBROO['F12_Name']: author = Person.get_by(ecrm_P1_is_identified_by = resource).first() result.append((label, author)) elif resource.uri == surf.ns.ECRM['E41_Appellation']: try: name = Name.get_by(ecrm_P139_has_alternative_form = resource).first() assert name is not None author = Person.get_by(ecrm_P1_is_identified_by = name).first() result.append((label, author)) except Exception as e: title = Title.get_by(ecrm_P139_has_alternative_form=resource).first() assert title is not None work = Work.get_by(efrbroo_P102_has_title = title).first() result.append((label, work)) return result
python
def search(self, search_string): """ Searches for a given string through the resources' labels. :param search_string: :return: an instance of `HucitAuthor` or `HucitWork`. """ query = """ SELECT ?s ?label ?type WHERE { ?s a ?type . ?s rdfs:label ?label . ?label bif:contains "'%s'" . } """ % search_string response = self._session.default_store.execute_sparql(query) results = [(result['s']['value'], result['label']['value'], result['type']['value']) for result in response["results"]["bindings"]] resources = [(label, self._session.get_resource(subject, self._session.get_class(type))) for subject, label, type in results] Name = self._session.get_class(surf.ns.EFRBROO['F12_Name']) Title = self._session.get_class(surf.ns.EFRBROO['E35_Title']) Work = self._session.get_class(surf.ns.EFRBROO['F1_Work']) Person = self._session.get_class(surf.ns.EFRBROO['F10_Person']) result = [] for label, resource in resources: if resource.uri == surf.ns.EFRBROO['E35_Title']: work = Work.get_by(efrbroo_P102_has_title = resource).first() result.append((label, work)) elif resource.uri == surf.ns.EFRBROO['F12_Name']: author = Person.get_by(ecrm_P1_is_identified_by = resource).first() result.append((label, author)) elif resource.uri == surf.ns.ECRM['E41_Appellation']: try: name = Name.get_by(ecrm_P139_has_alternative_form = resource).first() assert name is not None author = Person.get_by(ecrm_P1_is_identified_by = name).first() result.append((label, author)) except Exception as e: title = Title.get_by(ecrm_P139_has_alternative_form=resource).first() assert title is not None work = Work.get_by(efrbroo_P102_has_title = title).first() result.append((label, work)) return result
[ "def", "search", "(", "self", ",", "search_string", ")", ":", "query", "=", "\"\"\"\n SELECT ?s ?label ?type\n WHERE {\n ?s a ?type .\n ?s rdfs:label ?label .\n ?label bif:contains \"'%s'\" .\n }\n \"\"\"", "%", "search_string", "response", "=", "self", ".", "_session", ".", "default_store", ".", "execute_sparql", "(", "query", ")", "results", "=", "[", "(", "result", "[", "'s'", "]", "[", "'value'", "]", ",", "result", "[", "'label'", "]", "[", "'value'", "]", ",", "result", "[", "'type'", "]", "[", "'value'", "]", ")", "for", "result", "in", "response", "[", "\"results\"", "]", "[", "\"bindings\"", "]", "]", "resources", "=", "[", "(", "label", ",", "self", ".", "_session", ".", "get_resource", "(", "subject", ",", "self", ".", "_session", ".", "get_class", "(", "type", ")", ")", ")", "for", "subject", ",", "label", ",", "type", "in", "results", "]", "Name", "=", "self", ".", "_session", ".", "get_class", "(", "surf", ".", "ns", ".", "EFRBROO", "[", "'F12_Name'", "]", ")", "Title", "=", "self", ".", "_session", ".", "get_class", "(", "surf", ".", "ns", ".", "EFRBROO", "[", "'E35_Title'", "]", ")", "Work", "=", "self", ".", "_session", ".", "get_class", "(", "surf", ".", "ns", ".", "EFRBROO", "[", "'F1_Work'", "]", ")", "Person", "=", "self", ".", "_session", ".", "get_class", "(", "surf", ".", "ns", ".", "EFRBROO", "[", "'F10_Person'", "]", ")", "result", "=", "[", "]", "for", "label", ",", "resource", "in", "resources", ":", "if", "resource", ".", "uri", "==", "surf", ".", "ns", ".", "EFRBROO", "[", "'E35_Title'", "]", ":", "work", "=", "Work", ".", "get_by", "(", "efrbroo_P102_has_title", "=", "resource", ")", ".", "first", "(", ")", "result", ".", "append", "(", "(", "label", ",", "work", ")", ")", "elif", "resource", ".", "uri", "==", "surf", ".", "ns", ".", "EFRBROO", "[", "'F12_Name'", "]", ":", "author", "=", "Person", ".", "get_by", "(", "ecrm_P1_is_identified_by", "=", "resource", ")", ".", "first", "(", ")", "result", ".", "append", "(", "(", "label", ",", "author", ")", ")", "elif", "resource", ".", "uri", "==", "surf", ".", "ns", ".", "ECRM", "[", "'E41_Appellation'", "]", ":", "try", ":", "name", "=", "Name", ".", "get_by", "(", "ecrm_P139_has_alternative_form", "=", "resource", ")", ".", "first", "(", ")", "assert", "name", "is", "not", "None", "author", "=", "Person", ".", "get_by", "(", "ecrm_P1_is_identified_by", "=", "name", ")", ".", "first", "(", ")", "result", ".", "append", "(", "(", "label", ",", "author", ")", ")", "except", "Exception", "as", "e", ":", "title", "=", "Title", ".", "get_by", "(", "ecrm_P139_has_alternative_form", "=", "resource", ")", ".", "first", "(", ")", "assert", "title", "is", "not", "None", "work", "=", "Work", ".", "get_by", "(", "efrbroo_P102_has_title", "=", "title", ")", ".", "first", "(", ")", "result", ".", "append", "(", "(", "label", ",", "work", ")", ")", "return", "result" ]
Searches for a given string through the resources' labels. :param search_string: :return: an instance of `HucitAuthor` or `HucitWork`.
[ "Searches", "for", "a", "given", "string", "through", "the", "resources", "labels", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L206-L251
mromanello/hucitlib
knowledge_base/__init__.py
KnowledgeBase.get_authors
def get_authors(self): """ Returns the authors in the Knowledge Base. :return: a list of `HucitAuthor` instances. """ Person = self._session.get_class(surf.ns.EFRBROO['F10_Person']) return list(Person.all())
python
def get_authors(self): """ Returns the authors in the Knowledge Base. :return: a list of `HucitAuthor` instances. """ Person = self._session.get_class(surf.ns.EFRBROO['F10_Person']) return list(Person.all())
[ "def", "get_authors", "(", "self", ")", ":", "Person", "=", "self", ".", "_session", ".", "get_class", "(", "surf", ".", "ns", ".", "EFRBROO", "[", "'F10_Person'", "]", ")", "return", "list", "(", "Person", ".", "all", "(", ")", ")" ]
Returns the authors in the Knowledge Base. :return: a list of `HucitAuthor` instances.
[ "Returns", "the", "authors", "in", "the", "Knowledge", "Base", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L253-L261
mromanello/hucitlib
knowledge_base/__init__.py
KnowledgeBase.get_works
def get_works(self): """Return the author's works. :return: a list of `HucitWork` instances. """ Work = self._session.get_class(surf.ns.EFRBROO['F1_Work']) return list(Work.all())
python
def get_works(self): """Return the author's works. :return: a list of `HucitWork` instances. """ Work = self._session.get_class(surf.ns.EFRBROO['F1_Work']) return list(Work.all())
[ "def", "get_works", "(", "self", ")", ":", "Work", "=", "self", ".", "_session", ".", "get_class", "(", "surf", ".", "ns", ".", "EFRBROO", "[", "'F1_Work'", "]", ")", "return", "list", "(", "Work", ".", "all", "(", ")", ")" ]
Return the author's works. :return: a list of `HucitWork` instances.
[ "Return", "the", "author", "s", "works", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L263-L270
mromanello/hucitlib
knowledge_base/__init__.py
KnowledgeBase.get_author_label
def get_author_label(self, urn): """Get the label corresponding to the author identified by the CTS URN. try to get an lang=en label (if multiple labels in this lang pick the shortest) try to get a lang=la label (if multiple labels in this lang exist pick the shortest) try to get a lang=None label (if multiple labels in this lang exist pick the shortest) returns None if no name is found """ author = self.get_resource_by_urn(urn) names = author.get_names() en_names = sorted([name[1] for name in names if name[0] == "en"], key=len) try: assert len(en_names) > 0 return en_names[0] except Exception as e: none_names = sorted([name[1] for name in names if name[0] == None], key=len) try: return none_names[0] except Exception as e: la_names = sorted([name[1] for name in names if name[0] == "la"], key=len) try: assert len(la_names) > 0 return la_names[0] except Exception as e: return None
python
def get_author_label(self, urn): """Get the label corresponding to the author identified by the CTS URN. try to get an lang=en label (if multiple labels in this lang pick the shortest) try to get a lang=la label (if multiple labels in this lang exist pick the shortest) try to get a lang=None label (if multiple labels in this lang exist pick the shortest) returns None if no name is found """ author = self.get_resource_by_urn(urn) names = author.get_names() en_names = sorted([name[1] for name in names if name[0] == "en"], key=len) try: assert len(en_names) > 0 return en_names[0] except Exception as e: none_names = sorted([name[1] for name in names if name[0] == None], key=len) try: return none_names[0] except Exception as e: la_names = sorted([name[1] for name in names if name[0] == "la"], key=len) try: assert len(la_names) > 0 return la_names[0] except Exception as e: return None
[ "def", "get_author_label", "(", "self", ",", "urn", ")", ":", "author", "=", "self", ".", "get_resource_by_urn", "(", "urn", ")", "names", "=", "author", ".", "get_names", "(", ")", "en_names", "=", "sorted", "(", "[", "name", "[", "1", "]", "for", "name", "in", "names", "if", "name", "[", "0", "]", "==", "\"en\"", "]", ",", "key", "=", "len", ")", "try", ":", "assert", "len", "(", "en_names", ")", ">", "0", "return", "en_names", "[", "0", "]", "except", "Exception", "as", "e", ":", "none_names", "=", "sorted", "(", "[", "name", "[", "1", "]", "for", "name", "in", "names", "if", "name", "[", "0", "]", "==", "None", "]", ",", "key", "=", "len", ")", "try", ":", "return", "none_names", "[", "0", "]", "except", "Exception", "as", "e", ":", "la_names", "=", "sorted", "(", "[", "name", "[", "1", "]", "for", "name", "in", "names", "if", "name", "[", "0", "]", "==", "\"la\"", "]", ",", "key", "=", "len", ")", "try", ":", "assert", "len", "(", "la_names", ")", ">", "0", "return", "la_names", "[", "0", "]", "except", "Exception", "as", "e", ":", "return", "None" ]
Get the label corresponding to the author identified by the CTS URN. try to get an lang=en label (if multiple labels in this lang pick the shortest) try to get a lang=la label (if multiple labels in this lang exist pick the shortest) try to get a lang=None label (if multiple labels in this lang exist pick the shortest) returns None if no name is found
[ "Get", "the", "label", "corresponding", "to", "the", "author", "identified", "by", "the", "CTS", "URN", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L272-L298
mromanello/hucitlib
knowledge_base/__init__.py
KnowledgeBase.get_statistics
def get_statistics(self): """ Gather basic stats about the Knowledge Base and its contents. :return: a dictionary """ statistics = { "number_authors": 0, "number_author_names": 0, "number_author_abbreviations": 0, "number_works": 0, "number_work_titles": 0, "number_title_abbreviations": 0, "number_opus_maximum":0, } for author in self.get_authors(): if author.get_urn() is not None: opmax = True if self.get_opus_maximum_of(author.get_urn())\ is not None else False if opmax: statistics["number_opus_maximum"] += 1 statistics["number_authors"] += 1 statistics["number_author_names"] += len(author.get_names()) statistics["number_author_abbreviations"] += len( author.get_abbreviations() ) for work in author.get_works(): statistics["number_works"] += 1 statistics["number_work_titles"] += len(work.get_titles()) statistics["number_title_abbreviations"] += len( work.get_abbreviations() ) return statistics
python
def get_statistics(self): """ Gather basic stats about the Knowledge Base and its contents. :return: a dictionary """ statistics = { "number_authors": 0, "number_author_names": 0, "number_author_abbreviations": 0, "number_works": 0, "number_work_titles": 0, "number_title_abbreviations": 0, "number_opus_maximum":0, } for author in self.get_authors(): if author.get_urn() is not None: opmax = True if self.get_opus_maximum_of(author.get_urn())\ is not None else False if opmax: statistics["number_opus_maximum"] += 1 statistics["number_authors"] += 1 statistics["number_author_names"] += len(author.get_names()) statistics["number_author_abbreviations"] += len( author.get_abbreviations() ) for work in author.get_works(): statistics["number_works"] += 1 statistics["number_work_titles"] += len(work.get_titles()) statistics["number_title_abbreviations"] += len( work.get_abbreviations() ) return statistics
[ "def", "get_statistics", "(", "self", ")", ":", "statistics", "=", "{", "\"number_authors\"", ":", "0", ",", "\"number_author_names\"", ":", "0", ",", "\"number_author_abbreviations\"", ":", "0", ",", "\"number_works\"", ":", "0", ",", "\"number_work_titles\"", ":", "0", ",", "\"number_title_abbreviations\"", ":", "0", ",", "\"number_opus_maximum\"", ":", "0", ",", "}", "for", "author", "in", "self", ".", "get_authors", "(", ")", ":", "if", "author", ".", "get_urn", "(", ")", "is", "not", "None", ":", "opmax", "=", "True", "if", "self", ".", "get_opus_maximum_of", "(", "author", ".", "get_urn", "(", ")", ")", "is", "not", "None", "else", "False", "if", "opmax", ":", "statistics", "[", "\"number_opus_maximum\"", "]", "+=", "1", "statistics", "[", "\"number_authors\"", "]", "+=", "1", "statistics", "[", "\"number_author_names\"", "]", "+=", "len", "(", "author", ".", "get_names", "(", ")", ")", "statistics", "[", "\"number_author_abbreviations\"", "]", "+=", "len", "(", "author", ".", "get_abbreviations", "(", ")", ")", "for", "work", "in", "author", ".", "get_works", "(", ")", ":", "statistics", "[", "\"number_works\"", "]", "+=", "1", "statistics", "[", "\"number_work_titles\"", "]", "+=", "len", "(", "work", ".", "get_titles", "(", ")", ")", "statistics", "[", "\"number_title_abbreviations\"", "]", "+=", "len", "(", "work", ".", "get_abbreviations", "(", ")", ")", "return", "statistics" ]
Gather basic stats about the Knowledge Base and its contents. :return: a dictionary
[ "Gather", "basic", "stats", "about", "the", "Knowledge", "Base", "and", "its", "contents", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L334-L367
mromanello/hucitlib
knowledge_base/__init__.py
KnowledgeBase.get_opus_maximum_of
def get_opus_maximum_of(self, author_cts_urn): """Return the author's opux maximum (None otherwise). Given the CTS URN of an author, this method returns its opus maximum. If not available returns None. :param author_cts_urn: the author's CTS URN. :return: an instance of `surfext.HucitWork` or None """ author = self.get_resource_by_urn(author_cts_urn) assert author is not None works = author.get_works() if len(works) > 1: for work in works: if work.is_opus_maximum(): return work elif len(works) == 1: return works[0] else: return None
python
def get_opus_maximum_of(self, author_cts_urn): """Return the author's opux maximum (None otherwise). Given the CTS URN of an author, this method returns its opus maximum. If not available returns None. :param author_cts_urn: the author's CTS URN. :return: an instance of `surfext.HucitWork` or None """ author = self.get_resource_by_urn(author_cts_urn) assert author is not None works = author.get_works() if len(works) > 1: for work in works: if work.is_opus_maximum(): return work elif len(works) == 1: return works[0] else: return None
[ "def", "get_opus_maximum_of", "(", "self", ",", "author_cts_urn", ")", ":", "author", "=", "self", ".", "get_resource_by_urn", "(", "author_cts_urn", ")", "assert", "author", "is", "not", "None", "works", "=", "author", ".", "get_works", "(", ")", "if", "len", "(", "works", ")", ">", "1", ":", "for", "work", "in", "works", ":", "if", "work", ".", "is_opus_maximum", "(", ")", ":", "return", "work", "elif", "len", "(", "works", ")", "==", "1", ":", "return", "works", "[", "0", "]", "else", ":", "return", "None" ]
Return the author's opux maximum (None otherwise). Given the CTS URN of an author, this method returns its opus maximum. If not available returns None. :param author_cts_urn: the author's CTS URN. :return: an instance of `surfext.HucitWork` or None
[ "Return", "the", "author", "s", "opux", "maximum", "(", "None", "otherwise", ")", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L369-L390
mromanello/hucitlib
knowledge_base/__init__.py
KnowledgeBase.to_json
def to_json(self): """ Serialises the content of the KnowledgeBase as JSON. :return: TODO """ return json.dumps({ "statistics": self.get_statistics() , "authors": [json.loads(author.to_json()) for author in self.get_authors()] }, indent=2)
python
def to_json(self): """ Serialises the content of the KnowledgeBase as JSON. :return: TODO """ return json.dumps({ "statistics": self.get_statistics() , "authors": [json.loads(author.to_json()) for author in self.get_authors()] }, indent=2)
[ "def", "to_json", "(", "self", ")", ":", "return", "json", ".", "dumps", "(", "{", "\"statistics\"", ":", "self", ".", "get_statistics", "(", ")", ",", "\"authors\"", ":", "[", "json", ".", "loads", "(", "author", ".", "to_json", "(", ")", ")", "for", "author", "in", "self", ".", "get_authors", "(", ")", "]", "}", ",", "indent", "=", "2", ")" ]
Serialises the content of the KnowledgeBase as JSON. :return: TODO
[ "Serialises", "the", "content", "of", "the", "KnowledgeBase", "as", "JSON", "." ]
train
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L398-L407
BlueBrain/hpcbench
hpcbench/driver/base.py
write_yaml_report
def write_yaml_report(func): """Decorator used in campaign node post-processing """ @wraps(func) def _wrapper(*args, **kwargs): now = datetime.datetime.now() with Timer() as timer: data = func(*args, **kwargs) if isinstance(data, (SEQUENCES, types.GeneratorType)): report = dict(children=list(map(str, data))) elif isinstance(data, MAPPINGS): report = data else: raise Exception('Unexpected data type: %s', type(data)) report['elapsed'] = timer.elapsed report['date'] = now.isoformat() if "no_exec" not in kwargs and report is not None: with open(YAML_REPORT_FILE, 'w') as ostr: yaml.dump(report, ostr, default_flow_style=False) return report return _wrapper
python
def write_yaml_report(func): """Decorator used in campaign node post-processing """ @wraps(func) def _wrapper(*args, **kwargs): now = datetime.datetime.now() with Timer() as timer: data = func(*args, **kwargs) if isinstance(data, (SEQUENCES, types.GeneratorType)): report = dict(children=list(map(str, data))) elif isinstance(data, MAPPINGS): report = data else: raise Exception('Unexpected data type: %s', type(data)) report['elapsed'] = timer.elapsed report['date'] = now.isoformat() if "no_exec" not in kwargs and report is not None: with open(YAML_REPORT_FILE, 'w') as ostr: yaml.dump(report, ostr, default_flow_style=False) return report return _wrapper
[ "def", "write_yaml_report", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "with", "Timer", "(", ")", "as", "timer", ":", "data", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "data", ",", "(", "SEQUENCES", ",", "types", ".", "GeneratorType", ")", ")", ":", "report", "=", "dict", "(", "children", "=", "list", "(", "map", "(", "str", ",", "data", ")", ")", ")", "elif", "isinstance", "(", "data", ",", "MAPPINGS", ")", ":", "report", "=", "data", "else", ":", "raise", "Exception", "(", "'Unexpected data type: %s'", ",", "type", "(", "data", ")", ")", "report", "[", "'elapsed'", "]", "=", "timer", ".", "elapsed", "report", "[", "'date'", "]", "=", "now", ".", "isoformat", "(", ")", "if", "\"no_exec\"", "not", "in", "kwargs", "and", "report", "is", "not", "None", ":", "with", "open", "(", "YAML_REPORT_FILE", ",", "'w'", ")", "as", "ostr", ":", "yaml", ".", "dump", "(", "report", ",", "ostr", ",", "default_flow_style", "=", "False", ")", "return", "report", "return", "_wrapper" ]
Decorator used in campaign node post-processing
[ "Decorator", "used", "in", "campaign", "node", "post", "-", "processing" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/base.py#L32-L54
BlueBrain/hpcbench
hpcbench/driver/base.py
Enumerator.call_decorator
def call_decorator(cls, func): """class function that MUST be specified as decorator to the `__call__` method overriden by sub-classes. """ @wraps(func) def _wrap(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception: self.logger.exception('While executing benchmark') if not (self.catch_child_exception or False): raise return _wrap
python
def call_decorator(cls, func): """class function that MUST be specified as decorator to the `__call__` method overriden by sub-classes. """ @wraps(func) def _wrap(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception: self.logger.exception('While executing benchmark') if not (self.catch_child_exception or False): raise return _wrap
[ "def", "call_decorator", "(", "cls", ",", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "_wrap", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", ":", "self", ".", "logger", ".", "exception", "(", "'While executing benchmark'", ")", "if", "not", "(", "self", ".", "catch_child_exception", "or", "False", ")", ":", "raise", "return", "_wrap" ]
class function that MUST be specified as decorator to the `__call__` method overriden by sub-classes.
[ "class", "function", "that", "MUST", "be", "specified", "as", "decorator", "to", "the", "__call__", "method", "overriden", "by", "sub", "-", "classes", "." ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/base.py#L136-L150
BlueBrain/hpcbench
hpcbench/driver/base.py
Enumerator.traverse
def traverse(self): """Enumerate children and build associated objects """ builder = self.child_builder for child in self._children: with pushd(str(child)): yield child, builder(child)
python
def traverse(self): """Enumerate children and build associated objects """ builder = self.child_builder for child in self._children: with pushd(str(child)): yield child, builder(child)
[ "def", "traverse", "(", "self", ")", ":", "builder", "=", "self", ".", "child_builder", "for", "child", "in", "self", ".", "_children", ":", "with", "pushd", "(", "str", "(", "child", ")", ")", ":", "yield", "child", ",", "builder", "(", "child", ")" ]
Enumerate children and build associated objects
[ "Enumerate", "children", "and", "build", "associated", "objects" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/base.py#L162-L168
eng-tools/sfsimodels
sfsimodels/models/foundations.py
PadFoundation.i_ww
def i_ww(self): """ Second moment of inertia around the width axis. :return: """ d_values = [] for i in range(self.n_pads_l): d_values.append(self.pad_position_l(i)) d_values = np.array(d_values) - self.length / 2 area_d_sqrd = sum(self.pad_area * d_values ** 2) * self.n_pads_w i_second = self.pad_i_ww * self.n_pads return area_d_sqrd + i_second
python
def i_ww(self): """ Second moment of inertia around the width axis. :return: """ d_values = [] for i in range(self.n_pads_l): d_values.append(self.pad_position_l(i)) d_values = np.array(d_values) - self.length / 2 area_d_sqrd = sum(self.pad_area * d_values ** 2) * self.n_pads_w i_second = self.pad_i_ww * self.n_pads return area_d_sqrd + i_second
[ "def", "i_ww", "(", "self", ")", ":", "d_values", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "n_pads_l", ")", ":", "d_values", ".", "append", "(", "self", ".", "pad_position_l", "(", "i", ")", ")", "d_values", "=", "np", ".", "array", "(", "d_values", ")", "-", "self", ".", "length", "/", "2", "area_d_sqrd", "=", "sum", "(", "self", ".", "pad_area", "*", "d_values", "**", "2", ")", "*", "self", ".", "n_pads_w", "i_second", "=", "self", ".", "pad_i_ww", "*", "self", ".", "n_pads", "return", "area_d_sqrd", "+", "i_second" ]
Second moment of inertia around the width axis. :return:
[ "Second", "moment", "of", "inertia", "around", "the", "width", "axis", ".", ":", "return", ":" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/foundations.py#L268-L279
eng-tools/sfsimodels
sfsimodels/models/foundations.py
PadFoundation.i_ll
def i_ll(self): """ Second moment of inertia around the length axis. :return: """ d_values = [] for i in range(self.n_pads_w): d_values.append(self.pad_position_w(i)) d_values = np.array(d_values) - self.width / 2 area_d_sqrd = sum(self.pad_area * d_values ** 2) * self.n_pads_l i_second = self.pad_i_ll * self.n_pads return area_d_sqrd + i_second
python
def i_ll(self): """ Second moment of inertia around the length axis. :return: """ d_values = [] for i in range(self.n_pads_w): d_values.append(self.pad_position_w(i)) d_values = np.array(d_values) - self.width / 2 area_d_sqrd = sum(self.pad_area * d_values ** 2) * self.n_pads_l i_second = self.pad_i_ll * self.n_pads return area_d_sqrd + i_second
[ "def", "i_ll", "(", "self", ")", ":", "d_values", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "n_pads_w", ")", ":", "d_values", ".", "append", "(", "self", ".", "pad_position_w", "(", "i", ")", ")", "d_values", "=", "np", ".", "array", "(", "d_values", ")", "-", "self", ".", "width", "/", "2", "area_d_sqrd", "=", "sum", "(", "self", ".", "pad_area", "*", "d_values", "**", "2", ")", "*", "self", ".", "n_pads_l", "i_second", "=", "self", ".", "pad_i_ll", "*", "self", ".", "n_pads", "return", "area_d_sqrd", "+", "i_second" ]
Second moment of inertia around the length axis. :return:
[ "Second", "moment", "of", "inertia", "around", "the", "length", "axis", ".", ":", "return", ":" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/foundations.py#L282-L293
eng-tools/sfsimodels
sfsimodels/models/foundations.py
PadFoundation.pad_position_l
def pad_position_l(self, i): """ Determines the position of the ith pad in the length direction. Assumes equally spaced pads. :param i: ith number of pad in length direction (0-indexed) :return: """ if i >= self.n_pads_l: raise ModelError("pad index out-of-bounds") return (self.length - self.pad_length) / (self.n_pads_l - 1) * i + self.pad_length / 2
python
def pad_position_l(self, i): """ Determines the position of the ith pad in the length direction. Assumes equally spaced pads. :param i: ith number of pad in length direction (0-indexed) :return: """ if i >= self.n_pads_l: raise ModelError("pad index out-of-bounds") return (self.length - self.pad_length) / (self.n_pads_l - 1) * i + self.pad_length / 2
[ "def", "pad_position_l", "(", "self", ",", "i", ")", ":", "if", "i", ">=", "self", ".", "n_pads_l", ":", "raise", "ModelError", "(", "\"pad index out-of-bounds\"", ")", "return", "(", "self", ".", "length", "-", "self", ".", "pad_length", ")", "/", "(", "self", ".", "n_pads_l", "-", "1", ")", "*", "i", "+", "self", ".", "pad_length", "/", "2" ]
Determines the position of the ith pad in the length direction. Assumes equally spaced pads. :param i: ith number of pad in length direction (0-indexed) :return:
[ "Determines", "the", "position", "of", "the", "ith", "pad", "in", "the", "length", "direction", ".", "Assumes", "equally", "spaced", "pads", "." ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/foundations.py#L335-L345
eng-tools/sfsimodels
sfsimodels/models/foundations.py
PadFoundation.pad_position_w
def pad_position_w(self, i): """ Determines the position of the ith pad in the width direction. Assumes equally spaced pads. :param i: ith number of pad in width direction (0-indexed) :return: """ if i >= self.n_pads_w: raise ModelError("pad index out-of-bounds") return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2
python
def pad_position_w(self, i): """ Determines the position of the ith pad in the width direction. Assumes equally spaced pads. :param i: ith number of pad in width direction (0-indexed) :return: """ if i >= self.n_pads_w: raise ModelError("pad index out-of-bounds") return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2
[ "def", "pad_position_w", "(", "self", ",", "i", ")", ":", "if", "i", ">=", "self", ".", "n_pads_w", ":", "raise", "ModelError", "(", "\"pad index out-of-bounds\"", ")", "return", "(", "self", ".", "width", "-", "self", ".", "pad_width", ")", "/", "(", "self", ".", "n_pads_w", "-", "1", ")", "*", "i", "+", "self", ".", "pad_width", "/", "2" ]
Determines the position of the ith pad in the width direction. Assumes equally spaced pads. :param i: ith number of pad in width direction (0-indexed) :return:
[ "Determines", "the", "position", "of", "the", "ith", "pad", "in", "the", "width", "direction", ".", "Assumes", "equally", "spaced", "pads", "." ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/foundations.py#L347-L357
sci-bots/svg-model
svg_model/svgload/svg_parser.py
Svg.add_to_batch
def add_to_batch(self, batch): ''' Adds paths to the given batch object. They are all added as GL_TRIANGLES, so the batch will aggregate them all into a single OpenGL primitive. ''' for name in self.paths: svg_path = self.paths[name] svg_path.add_to_batch(batch)
python
def add_to_batch(self, batch): ''' Adds paths to the given batch object. They are all added as GL_TRIANGLES, so the batch will aggregate them all into a single OpenGL primitive. ''' for name in self.paths: svg_path = self.paths[name] svg_path.add_to_batch(batch)
[ "def", "add_to_batch", "(", "self", ",", "batch", ")", ":", "for", "name", "in", "self", ".", "paths", ":", "svg_path", "=", "self", ".", "paths", "[", "name", "]", "svg_path", ".", "add_to_batch", "(", "batch", ")" ]
Adds paths to the given batch object. They are all added as GL_TRIANGLES, so the batch will aggregate them all into a single OpenGL primitive.
[ "Adds", "paths", "to", "the", "given", "batch", "object", ".", "They", "are", "all", "added", "as", "GL_TRIANGLES", "so", "the", "batch", "will", "aggregate", "them", "all", "into", "a", "single", "OpenGL", "primitive", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/svgload/svg_parser.py#L55-L63
sci-bots/svg-model
svg_model/svgload/svg_parser.py
SvgParser.parse
def parse(self, xml_root, on_error=None): ''' Parse all <path> elements from xml_root. Optional on_error arg specifies a callback function to be run when an error occurs during parsing. The specified on_error function must accept 3 arguments: <svg filename>, <path_tag>, <error message> An example on_error handler is provided as svg_load.svg_parser.parse_warning(), where all SvgParseErrors are converted to warning messages. See usage below: >>> import re >>> svg_parser = SvgParser() >>> path_tag = etree.XML(""" ... <path ... xmlns="http://www.w3.org/2000/svg" ... xmlns:dc="http://purl.org/dc/elements/1.1/" ... xmlns:cc="http://creativecommons.org/ns#" ... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" ... xmlns:svg="http://www.w3.org/2000/svg" ... xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" ... xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" ... id="path13231" ... d="M8 4 l-4,4" ... linecap="square" ... stroke="#000000" ... stroke-width="0.25" ... />""") >>> with warnings.catch_warnings(record=True) as w: ... svg = svg_parser.parse(path_tag, on_error=parse_warning) >>> print w[-1].category <type 'exceptions.RuntimeWarning'> >>> match = re.search(r'^Error parsing None:\d+, unsupported svg path command: l', str(w[-1].message)) >>> print match is None False >>> path_tag = etree.XML(""" ... <path ... xmlns="http://www.w3.org/2000/svg" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" ... xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" ... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" ... xmlns:svg="http://www.w3.org/2000/svg" ... xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" ... style="fill:#0000ff;stroke:#ff0000;stroke-width:0.10000000000000001;stroke-miterlimit:4;stroke-dasharray:none" ... id="path18327" ... d="M 636.0331,256.9345 L 636.0331,256.9345" ... inkscape:connector-curvature="0"/>""") >>> with warnings.catch_warnings(record=True) as w: ... svg = svg_parser.parse(path_tag, on_error=parse_warning) >>> print w[-1].category <type 'exceptions.RuntimeWarning'> >>> match = re.search(r'^Error parsing None:\d+, loop needs 3 or more verts', str(w[-1].message)) >>> print match is None False ''' svg = Svg() svg_namespace = {'svg': 'http://www.w3.org/2000/svg'} path_tags = xml_root.xpath('(/svg:svg|/svg:svg/svg:g)/svg:path', namespaces=svg_namespace) parser = PathParser() for path_tag in path_tags: try: id, svg_path = parser.parse(path_tag) if svg_path.loops: svg.add_path(id, svg_path) except (ParseError, ) as why: filename = getattr(self, 'filename', None) args = (filename, path_tag, why.message) if on_error: on_error(*args) else: raise SvgParseError(*args) if svg.paths: x, y = svg.get_boundary().get_center() for svg_path in svg.paths.values(): svg_path.offset(-x, -y) return svg
python
def parse(self, xml_root, on_error=None): ''' Parse all <path> elements from xml_root. Optional on_error arg specifies a callback function to be run when an error occurs during parsing. The specified on_error function must accept 3 arguments: <svg filename>, <path_tag>, <error message> An example on_error handler is provided as svg_load.svg_parser.parse_warning(), where all SvgParseErrors are converted to warning messages. See usage below: >>> import re >>> svg_parser = SvgParser() >>> path_tag = etree.XML(""" ... <path ... xmlns="http://www.w3.org/2000/svg" ... xmlns:dc="http://purl.org/dc/elements/1.1/" ... xmlns:cc="http://creativecommons.org/ns#" ... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" ... xmlns:svg="http://www.w3.org/2000/svg" ... xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" ... xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" ... id="path13231" ... d="M8 4 l-4,4" ... linecap="square" ... stroke="#000000" ... stroke-width="0.25" ... />""") >>> with warnings.catch_warnings(record=True) as w: ... svg = svg_parser.parse(path_tag, on_error=parse_warning) >>> print w[-1].category <type 'exceptions.RuntimeWarning'> >>> match = re.search(r'^Error parsing None:\d+, unsupported svg path command: l', str(w[-1].message)) >>> print match is None False >>> path_tag = etree.XML(""" ... <path ... xmlns="http://www.w3.org/2000/svg" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" ... xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" ... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" ... xmlns:svg="http://www.w3.org/2000/svg" ... xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" ... style="fill:#0000ff;stroke:#ff0000;stroke-width:0.10000000000000001;stroke-miterlimit:4;stroke-dasharray:none" ... id="path18327" ... d="M 636.0331,256.9345 L 636.0331,256.9345" ... inkscape:connector-curvature="0"/>""") >>> with warnings.catch_warnings(record=True) as w: ... svg = svg_parser.parse(path_tag, on_error=parse_warning) >>> print w[-1].category <type 'exceptions.RuntimeWarning'> >>> match = re.search(r'^Error parsing None:\d+, loop needs 3 or more verts', str(w[-1].message)) >>> print match is None False ''' svg = Svg() svg_namespace = {'svg': 'http://www.w3.org/2000/svg'} path_tags = xml_root.xpath('(/svg:svg|/svg:svg/svg:g)/svg:path', namespaces=svg_namespace) parser = PathParser() for path_tag in path_tags: try: id, svg_path = parser.parse(path_tag) if svg_path.loops: svg.add_path(id, svg_path) except (ParseError, ) as why: filename = getattr(self, 'filename', None) args = (filename, path_tag, why.message) if on_error: on_error(*args) else: raise SvgParseError(*args) if svg.paths: x, y = svg.get_boundary().get_center() for svg_path in svg.paths.values(): svg_path.offset(-x, -y) return svg
[ "def", "parse", "(", "self", ",", "xml_root", ",", "on_error", "=", "None", ")", ":", "svg", "=", "Svg", "(", ")", "svg_namespace", "=", "{", "'svg'", ":", "'http://www.w3.org/2000/svg'", "}", "path_tags", "=", "xml_root", ".", "xpath", "(", "'(/svg:svg|/svg:svg/svg:g)/svg:path'", ",", "namespaces", "=", "svg_namespace", ")", "parser", "=", "PathParser", "(", ")", "for", "path_tag", "in", "path_tags", ":", "try", ":", "id", ",", "svg_path", "=", "parser", ".", "parse", "(", "path_tag", ")", "if", "svg_path", ".", "loops", ":", "svg", ".", "add_path", "(", "id", ",", "svg_path", ")", "except", "(", "ParseError", ",", ")", "as", "why", ":", "filename", "=", "getattr", "(", "self", ",", "'filename'", ",", "None", ")", "args", "=", "(", "filename", ",", "path_tag", ",", "why", ".", "message", ")", "if", "on_error", ":", "on_error", "(", "*", "args", ")", "else", ":", "raise", "SvgParseError", "(", "*", "args", ")", "if", "svg", ".", "paths", ":", "x", ",", "y", "=", "svg", ".", "get_boundary", "(", ")", ".", "get_center", "(", ")", "for", "svg_path", "in", "svg", ".", "paths", ".", "values", "(", ")", ":", "svg_path", ".", "offset", "(", "-", "x", ",", "-", "y", ")", "return", "svg" ]
Parse all <path> elements from xml_root. Optional on_error arg specifies a callback function to be run when an error occurs during parsing. The specified on_error function must accept 3 arguments: <svg filename>, <path_tag>, <error message> An example on_error handler is provided as svg_load.svg_parser.parse_warning(), where all SvgParseErrors are converted to warning messages. See usage below: >>> import re >>> svg_parser = SvgParser() >>> path_tag = etree.XML(""" ... <path ... xmlns="http://www.w3.org/2000/svg" ... xmlns:dc="http://purl.org/dc/elements/1.1/" ... xmlns:cc="http://creativecommons.org/ns#" ... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" ... xmlns:svg="http://www.w3.org/2000/svg" ... xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" ... xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" ... id="path13231" ... d="M8 4 l-4,4" ... linecap="square" ... stroke="#000000" ... stroke-width="0.25" ... />""") >>> with warnings.catch_warnings(record=True) as w: ... svg = svg_parser.parse(path_tag, on_error=parse_warning) >>> print w[-1].category <type 'exceptions.RuntimeWarning'> >>> match = re.search(r'^Error parsing None:\d+, unsupported svg path command: l', str(w[-1].message)) >>> print match is None False >>> path_tag = etree.XML(""" ... <path ... xmlns="http://www.w3.org/2000/svg" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" ... xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" ... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" ... xmlns:svg="http://www.w3.org/2000/svg" ... xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" ... style="fill:#0000ff;stroke:#ff0000;stroke-width:0.10000000000000001;stroke-miterlimit:4;stroke-dasharray:none" ... id="path18327" ... d="M 636.0331,256.9345 L 636.0331,256.9345" ... inkscape:connector-curvature="0"/>""") >>> with warnings.catch_warnings(record=True) as w: ... svg = svg_parser.parse(path_tag, on_error=parse_warning) >>> print w[-1].category <type 'exceptions.RuntimeWarning'> >>> match = re.search(r'^Error parsing None:\d+, loop needs 3 or more verts', str(w[-1].message)) >>> print match is None False
[ "Parse", "all", "<path", ">", "elements", "from", "xml_root", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/svgload/svg_parser.py#L99-L174
portfoliome/foil
foil/util.py
alphanum_key
def alphanum_key(s): """Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"] """ return [int(c) if c.isdigit() else c for c in _RE_INT.split(s)]
python
def alphanum_key(s): """Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"] """ return [int(c) if c.isdigit() else c for c in _RE_INT.split(s)]
[ "def", "alphanum_key", "(", "s", ")", ":", "return", "[", "int", "(", "c", ")", "if", "c", ".", "isdigit", "(", ")", "else", "c", "for", "c", "in", "_RE_INT", ".", "split", "(", "s", ")", "]" ]
Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"]
[ "Turn", "a", "string", "into", "a", "list", "of", "string", "and", "number", "chunks", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/util.py#L6-L12
PolyJIT/benchbuild
benchbuild/experiments/__init__.py
discover
def discover(): """ Import all experiments listed in PLUGINS_EXPERIMENTS. Tests: >>> from benchbuild.settings import CFG >>> from benchbuild.experiments import discover >>> import logging as lg >>> import sys >>> l = lg.getLogger('benchbuild') >>> lg.getLogger('benchbuild').setLevel(lg.DEBUG) >>> lg.getLogger('benchbuild').handlers = [lg.StreamHandler(stream=sys.stdout)] >>> CFG["plugins"]["experiments"] = ["benchbuild.non.existing", "benchbuild.experiments.raw"] >>> discover() Could not find 'benchbuild.non.existing' ImportError: No module named 'benchbuild.non' """ if CFG["plugins"]["autoload"]: experiment_plugins = CFG["plugins"]["experiments"].value for exp_plugin in experiment_plugins: try: importlib.import_module(exp_plugin) except ImportError as import_error: LOG.error("Could not find '%s'", exp_plugin) LOG.error("ImportError: %s", import_error.msg)
python
def discover(): """ Import all experiments listed in PLUGINS_EXPERIMENTS. Tests: >>> from benchbuild.settings import CFG >>> from benchbuild.experiments import discover >>> import logging as lg >>> import sys >>> l = lg.getLogger('benchbuild') >>> lg.getLogger('benchbuild').setLevel(lg.DEBUG) >>> lg.getLogger('benchbuild').handlers = [lg.StreamHandler(stream=sys.stdout)] >>> CFG["plugins"]["experiments"] = ["benchbuild.non.existing", "benchbuild.experiments.raw"] >>> discover() Could not find 'benchbuild.non.existing' ImportError: No module named 'benchbuild.non' """ if CFG["plugins"]["autoload"]: experiment_plugins = CFG["plugins"]["experiments"].value for exp_plugin in experiment_plugins: try: importlib.import_module(exp_plugin) except ImportError as import_error: LOG.error("Could not find '%s'", exp_plugin) LOG.error("ImportError: %s", import_error.msg)
[ "def", "discover", "(", ")", ":", "if", "CFG", "[", "\"plugins\"", "]", "[", "\"autoload\"", "]", ":", "experiment_plugins", "=", "CFG", "[", "\"plugins\"", "]", "[", "\"experiments\"", "]", ".", "value", "for", "exp_plugin", "in", "experiment_plugins", ":", "try", ":", "importlib", ".", "import_module", "(", "exp_plugin", ")", "except", "ImportError", "as", "import_error", ":", "LOG", ".", "error", "(", "\"Could not find '%s'\"", ",", "exp_plugin", ")", "LOG", ".", "error", "(", "\"ImportError: %s\"", ",", "import_error", ".", "msg", ")" ]
Import all experiments listed in PLUGINS_EXPERIMENTS. Tests: >>> from benchbuild.settings import CFG >>> from benchbuild.experiments import discover >>> import logging as lg >>> import sys >>> l = lg.getLogger('benchbuild') >>> lg.getLogger('benchbuild').setLevel(lg.DEBUG) >>> lg.getLogger('benchbuild').handlers = [lg.StreamHandler(stream=sys.stdout)] >>> CFG["plugins"]["experiments"] = ["benchbuild.non.existing", "benchbuild.experiments.raw"] >>> discover() Could not find 'benchbuild.non.existing' ImportError: No module named 'benchbuild.non'
[ "Import", "all", "experiments", "listed", "in", "PLUGINS_EXPERIMENTS", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/experiments/__init__.py#L20-L44
PolyJIT/benchbuild
benchbuild/cli/project.py
print_projects
def print_projects(projects=None): """ Print a list of projects registered for that experiment. Args: exp: The experiment to print all projects for. """ grouped_by = {} if not projects: print( "Your selection didn't include any projects for this experiment.") return for name in projects: prj = projects[name] if prj.GROUP not in grouped_by: grouped_by[prj.GROUP] = [] grouped_by[prj.GROUP].append("{name}/{group}".format( name=prj.NAME, group=prj.GROUP)) for name in grouped_by: print("group: {0}".format(name)) group_projects = sorted(grouped_by[name]) for prj in group_projects: prj_cls = projects[prj] version_str = None if hasattr(prj_cls, 'versions'): version_str = ", ".join(prj_cls.versions()) project_id = "{0}/{1}".format(prj_cls.NAME, prj_cls.GROUP) project_str = \ " name: {id:<32} version: {version:<24} source: {src}".format( id=str(project_id), version=str(prj_cls.VERSION), src=str(prj_cls.SRC_FILE)) print(project_str) if prj_cls.__doc__: docstr = prj_cls.__doc__.strip("\n ") print(" description: {desc}".format(desc=docstr)) if version_str: print(" versions: {versions}".format(versions=version_str)) print()
python
def print_projects(projects=None): """ Print a list of projects registered for that experiment. Args: exp: The experiment to print all projects for. """ grouped_by = {} if not projects: print( "Your selection didn't include any projects for this experiment.") return for name in projects: prj = projects[name] if prj.GROUP not in grouped_by: grouped_by[prj.GROUP] = [] grouped_by[prj.GROUP].append("{name}/{group}".format( name=prj.NAME, group=prj.GROUP)) for name in grouped_by: print("group: {0}".format(name)) group_projects = sorted(grouped_by[name]) for prj in group_projects: prj_cls = projects[prj] version_str = None if hasattr(prj_cls, 'versions'): version_str = ", ".join(prj_cls.versions()) project_id = "{0}/{1}".format(prj_cls.NAME, prj_cls.GROUP) project_str = \ " name: {id:<32} version: {version:<24} source: {src}".format( id=str(project_id), version=str(prj_cls.VERSION), src=str(prj_cls.SRC_FILE)) print(project_str) if prj_cls.__doc__: docstr = prj_cls.__doc__.strip("\n ") print(" description: {desc}".format(desc=docstr)) if version_str: print(" versions: {versions}".format(versions=version_str)) print()
[ "def", "print_projects", "(", "projects", "=", "None", ")", ":", "grouped_by", "=", "{", "}", "if", "not", "projects", ":", "print", "(", "\"Your selection didn't include any projects for this experiment.\"", ")", "return", "for", "name", "in", "projects", ":", "prj", "=", "projects", "[", "name", "]", "if", "prj", ".", "GROUP", "not", "in", "grouped_by", ":", "grouped_by", "[", "prj", ".", "GROUP", "]", "=", "[", "]", "grouped_by", "[", "prj", ".", "GROUP", "]", ".", "append", "(", "\"{name}/{group}\"", ".", "format", "(", "name", "=", "prj", ".", "NAME", ",", "group", "=", "prj", ".", "GROUP", ")", ")", "for", "name", "in", "grouped_by", ":", "print", "(", "\"group: {0}\"", ".", "format", "(", "name", ")", ")", "group_projects", "=", "sorted", "(", "grouped_by", "[", "name", "]", ")", "for", "prj", "in", "group_projects", ":", "prj_cls", "=", "projects", "[", "prj", "]", "version_str", "=", "None", "if", "hasattr", "(", "prj_cls", ",", "'versions'", ")", ":", "version_str", "=", "\", \"", ".", "join", "(", "prj_cls", ".", "versions", "(", ")", ")", "project_id", "=", "\"{0}/{1}\"", ".", "format", "(", "prj_cls", ".", "NAME", ",", "prj_cls", ".", "GROUP", ")", "project_str", "=", "\" name: {id:<32} version: {version:<24} source: {src}\"", ".", "format", "(", "id", "=", "str", "(", "project_id", ")", ",", "version", "=", "str", "(", "prj_cls", ".", "VERSION", ")", ",", "src", "=", "str", "(", "prj_cls", ".", "SRC_FILE", ")", ")", "print", "(", "project_str", ")", "if", "prj_cls", ".", "__doc__", ":", "docstr", "=", "prj_cls", ".", "__doc__", ".", "strip", "(", "\"\\n \"", ")", "print", "(", "\" description: {desc}\"", ".", "format", "(", "desc", "=", "docstr", ")", ")", "if", "version_str", ":", "print", "(", "\" versions: {versions}\"", ".", "format", "(", "versions", "=", "version_str", ")", ")", "print", "(", ")" ]
Print a list of projects registered for that experiment. Args: exp: The experiment to print all projects for.
[ "Print", "a", "list", "of", "projects", "registered", "for", "that", "experiment", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/cli/project.py#L35-L81
Capitains/Nautilus
capitains_nautilus/cmd.py
_commandline
def _commandline(repositories, port=8000, host="127.0.0.1", debug=False, cache=None, cache_path="./cache", redis=None): """ Run a CTS API from command line. .. warning:: This function should not be used in the production context :param repositories: :param port: :param ip: :param debug: :param cache: :param cache_path: :return: """ if cache == "redis": nautilus_cache = RedisCache(redis) cache_type = "redis" elif cache == "filesystem": nautilus_cache = FileSystemCache(cache_path) cache_type = "simple" else: nautilus_cache = NullCache() cache_type = "simple" app = Flask("Nautilus") if debug: app.logger.setLevel(logging.INFO) resolver = NautilusCtsResolver(resource=repositories) nautilus = FlaskNautilus( app=app, resolver=resolver #parser_cache=WerkzeugCacheWrapper(nautilus_cache), #logger=None ) nautilus.resolver.parse() app.run(debug=debug, port=port, host=host)
python
def _commandline(repositories, port=8000, host="127.0.0.1", debug=False, cache=None, cache_path="./cache", redis=None): """ Run a CTS API from command line. .. warning:: This function should not be used in the production context :param repositories: :param port: :param ip: :param debug: :param cache: :param cache_path: :return: """ if cache == "redis": nautilus_cache = RedisCache(redis) cache_type = "redis" elif cache == "filesystem": nautilus_cache = FileSystemCache(cache_path) cache_type = "simple" else: nautilus_cache = NullCache() cache_type = "simple" app = Flask("Nautilus") if debug: app.logger.setLevel(logging.INFO) resolver = NautilusCtsResolver(resource=repositories) nautilus = FlaskNautilus( app=app, resolver=resolver #parser_cache=WerkzeugCacheWrapper(nautilus_cache), #logger=None ) nautilus.resolver.parse() app.run(debug=debug, port=port, host=host)
[ "def", "_commandline", "(", "repositories", ",", "port", "=", "8000", ",", "host", "=", "\"127.0.0.1\"", ",", "debug", "=", "False", ",", "cache", "=", "None", ",", "cache_path", "=", "\"./cache\"", ",", "redis", "=", "None", ")", ":", "if", "cache", "==", "\"redis\"", ":", "nautilus_cache", "=", "RedisCache", "(", "redis", ")", "cache_type", "=", "\"redis\"", "elif", "cache", "==", "\"filesystem\"", ":", "nautilus_cache", "=", "FileSystemCache", "(", "cache_path", ")", "cache_type", "=", "\"simple\"", "else", ":", "nautilus_cache", "=", "NullCache", "(", ")", "cache_type", "=", "\"simple\"", "app", "=", "Flask", "(", "\"Nautilus\"", ")", "if", "debug", ":", "app", ".", "logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "resolver", "=", "NautilusCtsResolver", "(", "resource", "=", "repositories", ")", "nautilus", "=", "FlaskNautilus", "(", "app", "=", "app", ",", "resolver", "=", "resolver", "#parser_cache=WerkzeugCacheWrapper(nautilus_cache),", "#logger=None", ")", "nautilus", ".", "resolver", ".", "parse", "(", ")", "app", ".", "run", "(", "debug", "=", "debug", ",", "port", "=", "port", ",", "host", "=", "host", ")" ]
Run a CTS API from command line. .. warning:: This function should not be used in the production context :param repositories: :param port: :param ip: :param debug: :param cache: :param cache_path: :return:
[ "Run", "a", "CTS", "API", "from", "command", "line", "." ]
train
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cmd.py#L11-L49
BlueBrain/hpcbench
hpcbench/report.py
tex_escape
def tex_escape(text): """Escape string for LaTeX usage :param text: a plain text message :return: the message escaped to appear correctly in LaTeX """ conv = { '&': r'\&', '%': r'\%', '$': r'\$', '#': r'\#', '_': r'\_', '{': r'\{', '}': r'\}', '~': r'\textasciitilde{}', '^': r'\^{}', '\\': r'\textbackslash{}', '<': r'\textless ', '>': r'\textgreater ', } regex = re.compile( '|'.join( re.escape(six.text_type(key)) for key in sorted(conv.keys(), key=lambda item: -len(item)) ) ) return regex.sub(lambda match: conv[match.group()], text)
python
def tex_escape(text): """Escape string for LaTeX usage :param text: a plain text message :return: the message escaped to appear correctly in LaTeX """ conv = { '&': r'\&', '%': r'\%', '$': r'\$', '#': r'\#', '_': r'\_', '{': r'\{', '}': r'\}', '~': r'\textasciitilde{}', '^': r'\^{}', '\\': r'\textbackslash{}', '<': r'\textless ', '>': r'\textgreater ', } regex = re.compile( '|'.join( re.escape(six.text_type(key)) for key in sorted(conv.keys(), key=lambda item: -len(item)) ) ) return regex.sub(lambda match: conv[match.group()], text)
[ "def", "tex_escape", "(", "text", ")", ":", "conv", "=", "{", "'&'", ":", "r'\\&'", ",", "'%'", ":", "r'\\%'", ",", "'$'", ":", "r'\\$'", ",", "'#'", ":", "r'\\#'", ",", "'_'", ":", "r'\\_'", ",", "'{'", ":", "r'\\{'", ",", "'}'", ":", "r'\\}'", ",", "'~'", ":", "r'\\textasciitilde{}'", ",", "'^'", ":", "r'\\^{}'", ",", "'\\\\'", ":", "r'\\textbackslash{}'", ",", "'<'", ":", "r'\\textless '", ",", "'>'", ":", "r'\\textgreater '", ",", "}", "regex", "=", "re", ".", "compile", "(", "'|'", ".", "join", "(", "re", ".", "escape", "(", "six", ".", "text_type", "(", "key", ")", ")", "for", "key", "in", "sorted", "(", "conv", ".", "keys", "(", ")", ",", "key", "=", "lambda", "item", ":", "-", "len", "(", "item", ")", ")", ")", ")", "return", "regex", ".", "sub", "(", "lambda", "match", ":", "conv", "[", "match", ".", "group", "(", ")", "]", ",", "text", ")" ]
Escape string for LaTeX usage :param text: a plain text message :return: the message escaped to appear correctly in LaTeX
[ "Escape", "string", "for", "LaTeX", "usage", ":", "param", "text", ":", "a", "plain", "text", "message", ":", "return", ":", "the", "message", "escaped", "to", "appear", "correctly", "in", "LaTeX" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/report.py#L13-L38
BlueBrain/hpcbench
hpcbench/report.py
render
def render(template=None, ostr=None, **kwargs): """Generate report from a campaign :param template: Jinja template to use, ``DEFAULT_TEMPLATE`` is used if not specified :param ostr: output file or filename. Default is standard output """ jinja_environment.filters['texscape'] = tex_escape template = template or DEFAULT_TEMPLATE ostr = ostr or sys.stdout jinja_template = jinja_environment.get_template(template) jinja_template.stream(**kwargs).dump(ostr)
python
def render(template=None, ostr=None, **kwargs): """Generate report from a campaign :param template: Jinja template to use, ``DEFAULT_TEMPLATE`` is used if not specified :param ostr: output file or filename. Default is standard output """ jinja_environment.filters['texscape'] = tex_escape template = template or DEFAULT_TEMPLATE ostr = ostr or sys.stdout jinja_template = jinja_environment.get_template(template) jinja_template.stream(**kwargs).dump(ostr)
[ "def", "render", "(", "template", "=", "None", ",", "ostr", "=", "None", ",", "*", "*", "kwargs", ")", ":", "jinja_environment", ".", "filters", "[", "'texscape'", "]", "=", "tex_escape", "template", "=", "template", "or", "DEFAULT_TEMPLATE", "ostr", "=", "ostr", "or", "sys", ".", "stdout", "jinja_template", "=", "jinja_environment", ".", "get_template", "(", "template", ")", "jinja_template", ".", "stream", "(", "*", "*", "kwargs", ")", ".", "dump", "(", "ostr", ")" ]
Generate report from a campaign :param template: Jinja template to use, ``DEFAULT_TEMPLATE`` is used if not specified :param ostr: output file or filename. Default is standard output
[ "Generate", "report", "from", "a", "campaign" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/report.py#L41-L52
sci-bots/svg-model
svg_model/point_query.py
get_shapes_pymunk_space
def get_shapes_pymunk_space(df_convex_shapes, shape_i_columns): ''' Return two-ple containing: - A `pymunk.Space` instance. - A `pandas.Series` mapping each `pymunk.Body` object in the `Space` to a shape index. The `Body` to shape index mapping makes it possible to, for example, look up the index of the convex shape associated with a `Body` returned by a `pymunk` point query in the `Space`. ''' if isinstance(shape_i_columns, bytes): shape_i_columns = [shape_i_columns] space = pm.Space() bodies = [] convex_groups = df_convex_shapes.groupby(shape_i_columns) for shape_i, df_i in convex_groups: if not isinstance(shape_i, (list, tuple)): shape_i = [shape_i] if hasattr(pm.Body, 'STATIC'): # Assume `pymunk>=5.0`, where static bodies must be declared # explicitly. body = pm.Body(body_type=pm.Body.STATIC) else: # Assume `pymunk<5.0`, where bodies are static unless otherwise # specified. body = pm.Body() # Using the code below is about 66% faster than: # `df_i[['x', 'y']].values`. points = [[x, y] for x, y in zip(df_i.x, df_i.y)] poly = pm.Poly(body, points) space.add(poly) bodies.append([body, shape_i[0]]) bodies = None if not bodies else bodies return space, (pd.DataFrame(bodies, columns=['body', shape_i_columns[0]]) .set_index('body')[shape_i_columns[0]])
python
def get_shapes_pymunk_space(df_convex_shapes, shape_i_columns): ''' Return two-ple containing: - A `pymunk.Space` instance. - A `pandas.Series` mapping each `pymunk.Body` object in the `Space` to a shape index. The `Body` to shape index mapping makes it possible to, for example, look up the index of the convex shape associated with a `Body` returned by a `pymunk` point query in the `Space`. ''' if isinstance(shape_i_columns, bytes): shape_i_columns = [shape_i_columns] space = pm.Space() bodies = [] convex_groups = df_convex_shapes.groupby(shape_i_columns) for shape_i, df_i in convex_groups: if not isinstance(shape_i, (list, tuple)): shape_i = [shape_i] if hasattr(pm.Body, 'STATIC'): # Assume `pymunk>=5.0`, where static bodies must be declared # explicitly. body = pm.Body(body_type=pm.Body.STATIC) else: # Assume `pymunk<5.0`, where bodies are static unless otherwise # specified. body = pm.Body() # Using the code below is about 66% faster than: # `df_i[['x', 'y']].values`. points = [[x, y] for x, y in zip(df_i.x, df_i.y)] poly = pm.Poly(body, points) space.add(poly) bodies.append([body, shape_i[0]]) bodies = None if not bodies else bodies return space, (pd.DataFrame(bodies, columns=['body', shape_i_columns[0]]) .set_index('body')[shape_i_columns[0]])
[ "def", "get_shapes_pymunk_space", "(", "df_convex_shapes", ",", "shape_i_columns", ")", ":", "if", "isinstance", "(", "shape_i_columns", ",", "bytes", ")", ":", "shape_i_columns", "=", "[", "shape_i_columns", "]", "space", "=", "pm", ".", "Space", "(", ")", "bodies", "=", "[", "]", "convex_groups", "=", "df_convex_shapes", ".", "groupby", "(", "shape_i_columns", ")", "for", "shape_i", ",", "df_i", "in", "convex_groups", ":", "if", "not", "isinstance", "(", "shape_i", ",", "(", "list", ",", "tuple", ")", ")", ":", "shape_i", "=", "[", "shape_i", "]", "if", "hasattr", "(", "pm", ".", "Body", ",", "'STATIC'", ")", ":", "# Assume `pymunk>=5.0`, where static bodies must be declared", "# explicitly.", "body", "=", "pm", ".", "Body", "(", "body_type", "=", "pm", ".", "Body", ".", "STATIC", ")", "else", ":", "# Assume `pymunk<5.0`, where bodies are static unless otherwise", "# specified.", "body", "=", "pm", ".", "Body", "(", ")", "# Using the code below is about 66% faster than:", "# `df_i[['x', 'y']].values`.", "points", "=", "[", "[", "x", ",", "y", "]", "for", "x", ",", "y", "in", "zip", "(", "df_i", ".", "x", ",", "df_i", ".", "y", ")", "]", "poly", "=", "pm", ".", "Poly", "(", "body", ",", "points", ")", "space", ".", "add", "(", "poly", ")", "bodies", ".", "append", "(", "[", "body", ",", "shape_i", "[", "0", "]", "]", ")", "bodies", "=", "None", "if", "not", "bodies", "else", "bodies", "return", "space", ",", "(", "pd", ".", "DataFrame", "(", "bodies", ",", "columns", "=", "[", "'body'", ",", "shape_i_columns", "[", "0", "]", "]", ")", ".", "set_index", "(", "'body'", ")", "[", "shape_i_columns", "[", "0", "]", "]", ")" ]
Return two-ple containing: - A `pymunk.Space` instance. - A `pandas.Series` mapping each `pymunk.Body` object in the `Space` to a shape index. The `Body` to shape index mapping makes it possible to, for example, look up the index of the convex shape associated with a `Body` returned by a `pymunk` point query in the `Space`.
[ "Return", "two", "-", "ple", "containing", ":" ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/point_query.py#L12-L54
eng-tools/sfsimodels
sfsimodels/files.py
load_json
def load_json(ffp, custom=None, verbose=0): """ Given a json file it creates a dictionary of sfsi objects :param ffp: str, Full file path to json file :param custom: dict, used to load custom objects, {model type: custom object} :param verbose: int, console output :return: dict """ data = json.load(open(ffp)) return ecp_dict_to_objects(data, custom, verbose=verbose)
python
def load_json(ffp, custom=None, verbose=0): """ Given a json file it creates a dictionary of sfsi objects :param ffp: str, Full file path to json file :param custom: dict, used to load custom objects, {model type: custom object} :param verbose: int, console output :return: dict """ data = json.load(open(ffp)) return ecp_dict_to_objects(data, custom, verbose=verbose)
[ "def", "load_json", "(", "ffp", ",", "custom", "=", "None", ",", "verbose", "=", "0", ")", ":", "data", "=", "json", ".", "load", "(", "open", "(", "ffp", ")", ")", "return", "ecp_dict_to_objects", "(", "data", ",", "custom", ",", "verbose", "=", "verbose", ")" ]
Given a json file it creates a dictionary of sfsi objects :param ffp: str, Full file path to json file :param custom: dict, used to load custom objects, {model type: custom object} :param verbose: int, console output :return: dict
[ "Given", "a", "json", "file", "it", "creates", "a", "dictionary", "of", "sfsi", "objects" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L11-L21
eng-tools/sfsimodels
sfsimodels/files.py
loads_json
def loads_json(p_str, custom=None, meta=False, verbose=0): """ Given a json string it creates a dictionary of sfsi objects :param ffp: str, Full file path to json file :param custom: dict, used to load custom objects, {model type: custom object} :param meta: bool, if true then also return all ecp meta data in separate dict :param verbose: int, console output :return: dict """ data = json.loads(p_str) if meta: md = {} for item in data: if item != "models": md[item] = data[item] return ecp_dict_to_objects(data, custom, verbose=verbose), md else: return ecp_dict_to_objects(data, custom, verbose=verbose)
python
def loads_json(p_str, custom=None, meta=False, verbose=0): """ Given a json string it creates a dictionary of sfsi objects :param ffp: str, Full file path to json file :param custom: dict, used to load custom objects, {model type: custom object} :param meta: bool, if true then also return all ecp meta data in separate dict :param verbose: int, console output :return: dict """ data = json.loads(p_str) if meta: md = {} for item in data: if item != "models": md[item] = data[item] return ecp_dict_to_objects(data, custom, verbose=verbose), md else: return ecp_dict_to_objects(data, custom, verbose=verbose)
[ "def", "loads_json", "(", "p_str", ",", "custom", "=", "None", ",", "meta", "=", "False", ",", "verbose", "=", "0", ")", ":", "data", "=", "json", ".", "loads", "(", "p_str", ")", "if", "meta", ":", "md", "=", "{", "}", "for", "item", "in", "data", ":", "if", "item", "!=", "\"models\"", ":", "md", "[", "item", "]", "=", "data", "[", "item", "]", "return", "ecp_dict_to_objects", "(", "data", ",", "custom", ",", "verbose", "=", "verbose", ")", ",", "md", "else", ":", "return", "ecp_dict_to_objects", "(", "data", ",", "custom", ",", "verbose", "=", "verbose", ")" ]
Given a json string it creates a dictionary of sfsi objects :param ffp: str, Full file path to json file :param custom: dict, used to load custom objects, {model type: custom object} :param meta: bool, if true then also return all ecp meta data in separate dict :param verbose: int, console output :return: dict
[ "Given", "a", "json", "string", "it", "creates", "a", "dictionary", "of", "sfsi", "objects" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L33-L51
eng-tools/sfsimodels
sfsimodels/files.py
ecp_dict_to_objects
def ecp_dict_to_objects(ecp_dict, custom_map=None, verbose=0): """ Given an ecp dictionary, build a dictionary of sfsi objects :param ecp_dict: dict, engineering consistency project dictionary :param custom: dict, used to load custom objects, {model type: custom object} :param verbose: int, console output :return: dict """ if custom_map is None: custom_map = {} obj_map = { "soil-soil": soils.Soil, "soil-critical_soil": soils.CriticalSoil, "soil-soil_critical": soils.CriticalSoil, # deprecated type "soil-stress_dependent_soil": soils.StressDependentSoil, "soil-soil_stress_dependent": soils.StressDependentSoil, "soil_profile-soil_profile": soils.SoilProfile, "building-building": buildings.Building, "building-frame_building": buildings.FrameBuilding, "building-frame_building2D": buildings.FrameBuilding2D, "building-building_frame2D": buildings.FrameBuilding2D, # deprecated type "building-wall_building": buildings.WallBuilding, "building-building_wall": buildings.WallBuilding, # deprecated type "building-structure": buildings.SDOFBuilding, # Deprecated type, remove in v1 "building-sdof": buildings.SDOFBuilding, "foundation-foundation": foundations.Foundation, "foundation-foundation_raft": foundations.RaftFoundation, # deprecated type "foundation-raft_foundation": foundations.RaftFoundation, "foundation-raft": foundations.RaftFoundation, # Deprecated approach for type, remove in v1 "foundation-pad_foundation": foundations.PadFoundation, "foundation-foundation_pad": foundations.PadFoundation, # deprecated type "section-section": buildings.Section, "custom_object-custom_object": abstract_models.CustomObject, "system-system": systems.SoilStructureSystem, # deprecated type "system-sfs": systems.SoilStructureSystem } # merge and overwrite the object map with custom maps # for item in custom_map: # obj_map[item] = custom_map[item] obj_map = {**obj_map, **custom_map} data_models = ecp_dict["models"] exception_list = [] objs = OrderedDict() collected = set([]) # Set base type properly mtypes = list(data_models) for mtype in mtypes: base_type = mtype if base_type[:-1] in standard_types: # support the loading of old plural based ecp files base_type = base_type[:-1] data_models[base_type] = data_models[mtype] del data_models[mtype] for m_id in data_models[base_type]: data_models[base_type][m_id]["base_type"] = base_type for mtype in data_models: base_type = mtype if base_type in exception_list: continue collected.add(base_type) objs[base_type] = OrderedDict() for m_id in data_models[mtype]: obj = data_models[mtype][m_id] if "type" not in obj: obj["type"] = base_type try: obj_class = obj_map["%s-%s" % (base_type, obj["type"])] except KeyError: if obj["type"] in deprecated_types: try: obj_class = obj_map["%s-%s" % (base_type, deprecated_types[obj["type"]])] except KeyError: raise KeyError("Map for Model: '%s' index: '%s' and type: '%s' not available, " "add '%s-%s' to custom dict" % (base_type, m_id, base_type, base_type, obj["type"])) else: raise KeyError("Map for Model: '%s' index: '%s' and type: '%s' not available, " "add '%s-%s' to custom dict" % (base_type, m_id, base_type, base_type, obj["type"])) try: new_instance = obj_class() except TypeError as e: if "required positional argument:" in str(e): parameters = [str(e).split("argument: ")[-1]] elif "required positional arguments:" in str(e): p_str = str(e).split("arguments: ")[-1] if ", and " in p_str: # if more than 2 partial = p_str.split(", and ") parameters = partial[0].split(", ") + partial[-1:] else: # if one parameters = p_str.split(" and ") else: raise TypeError(e) params = [] for parameter in parameters: parameter = parameter[1:-1] try: params.append(data_models[mtype][m_id][parameter]) except KeyError as e2: # To be removed and just raise exception deprecation("Your file is out of date, " "run sfsimodels.migrate_ecp(<file-path>, <out-file-path>).") if mtype == "building": params = [len(data_models[mtype][m_id]["storey_masses"])] # n_storeys if "frame" in data_models[mtype][m_id]["type"]: params.append(len(data_models[mtype][m_id]["bay_lengths"])) else: raise KeyError("Can't find required positional argument: {0} for {1} id: {2}".format( parameter, mtype, m_id )) new_instance = obj_class(*params) add_to_obj(new_instance, data_models[mtype][m_id], objs=objs, verbose=verbose) # print(mtype, m_id) objs[base_type][int(data_models[mtype][m_id]["id"])] = new_instance # Deal with all the exceptions # for mtype in data_models: # base_type = mtype # # if base_type in collected: # continue # if base_type not in objs: # objs[base_type] = OrderedDict() all_bts = list(objs) for base_type in all_bts: # Support for old style ecp file if base_type in standard_types: objs[base_type + "s"] = objs[base_type] return objs
python
def ecp_dict_to_objects(ecp_dict, custom_map=None, verbose=0): """ Given an ecp dictionary, build a dictionary of sfsi objects :param ecp_dict: dict, engineering consistency project dictionary :param custom: dict, used to load custom objects, {model type: custom object} :param verbose: int, console output :return: dict """ if custom_map is None: custom_map = {} obj_map = { "soil-soil": soils.Soil, "soil-critical_soil": soils.CriticalSoil, "soil-soil_critical": soils.CriticalSoil, # deprecated type "soil-stress_dependent_soil": soils.StressDependentSoil, "soil-soil_stress_dependent": soils.StressDependentSoil, "soil_profile-soil_profile": soils.SoilProfile, "building-building": buildings.Building, "building-frame_building": buildings.FrameBuilding, "building-frame_building2D": buildings.FrameBuilding2D, "building-building_frame2D": buildings.FrameBuilding2D, # deprecated type "building-wall_building": buildings.WallBuilding, "building-building_wall": buildings.WallBuilding, # deprecated type "building-structure": buildings.SDOFBuilding, # Deprecated type, remove in v1 "building-sdof": buildings.SDOFBuilding, "foundation-foundation": foundations.Foundation, "foundation-foundation_raft": foundations.RaftFoundation, # deprecated type "foundation-raft_foundation": foundations.RaftFoundation, "foundation-raft": foundations.RaftFoundation, # Deprecated approach for type, remove in v1 "foundation-pad_foundation": foundations.PadFoundation, "foundation-foundation_pad": foundations.PadFoundation, # deprecated type "section-section": buildings.Section, "custom_object-custom_object": abstract_models.CustomObject, "system-system": systems.SoilStructureSystem, # deprecated type "system-sfs": systems.SoilStructureSystem } # merge and overwrite the object map with custom maps # for item in custom_map: # obj_map[item] = custom_map[item] obj_map = {**obj_map, **custom_map} data_models = ecp_dict["models"] exception_list = [] objs = OrderedDict() collected = set([]) # Set base type properly mtypes = list(data_models) for mtype in mtypes: base_type = mtype if base_type[:-1] in standard_types: # support the loading of old plural based ecp files base_type = base_type[:-1] data_models[base_type] = data_models[mtype] del data_models[mtype] for m_id in data_models[base_type]: data_models[base_type][m_id]["base_type"] = base_type for mtype in data_models: base_type = mtype if base_type in exception_list: continue collected.add(base_type) objs[base_type] = OrderedDict() for m_id in data_models[mtype]: obj = data_models[mtype][m_id] if "type" not in obj: obj["type"] = base_type try: obj_class = obj_map["%s-%s" % (base_type, obj["type"])] except KeyError: if obj["type"] in deprecated_types: try: obj_class = obj_map["%s-%s" % (base_type, deprecated_types[obj["type"]])] except KeyError: raise KeyError("Map for Model: '%s' index: '%s' and type: '%s' not available, " "add '%s-%s' to custom dict" % (base_type, m_id, base_type, base_type, obj["type"])) else: raise KeyError("Map for Model: '%s' index: '%s' and type: '%s' not available, " "add '%s-%s' to custom dict" % (base_type, m_id, base_type, base_type, obj["type"])) try: new_instance = obj_class() except TypeError as e: if "required positional argument:" in str(e): parameters = [str(e).split("argument: ")[-1]] elif "required positional arguments:" in str(e): p_str = str(e).split("arguments: ")[-1] if ", and " in p_str: # if more than 2 partial = p_str.split(", and ") parameters = partial[0].split(", ") + partial[-1:] else: # if one parameters = p_str.split(" and ") else: raise TypeError(e) params = [] for parameter in parameters: parameter = parameter[1:-1] try: params.append(data_models[mtype][m_id][parameter]) except KeyError as e2: # To be removed and just raise exception deprecation("Your file is out of date, " "run sfsimodels.migrate_ecp(<file-path>, <out-file-path>).") if mtype == "building": params = [len(data_models[mtype][m_id]["storey_masses"])] # n_storeys if "frame" in data_models[mtype][m_id]["type"]: params.append(len(data_models[mtype][m_id]["bay_lengths"])) else: raise KeyError("Can't find required positional argument: {0} for {1} id: {2}".format( parameter, mtype, m_id )) new_instance = obj_class(*params) add_to_obj(new_instance, data_models[mtype][m_id], objs=objs, verbose=verbose) # print(mtype, m_id) objs[base_type][int(data_models[mtype][m_id]["id"])] = new_instance # Deal with all the exceptions # for mtype in data_models: # base_type = mtype # # if base_type in collected: # continue # if base_type not in objs: # objs[base_type] = OrderedDict() all_bts = list(objs) for base_type in all_bts: # Support for old style ecp file if base_type in standard_types: objs[base_type + "s"] = objs[base_type] return objs
[ "def", "ecp_dict_to_objects", "(", "ecp_dict", ",", "custom_map", "=", "None", ",", "verbose", "=", "0", ")", ":", "if", "custom_map", "is", "None", ":", "custom_map", "=", "{", "}", "obj_map", "=", "{", "\"soil-soil\"", ":", "soils", ".", "Soil", ",", "\"soil-critical_soil\"", ":", "soils", ".", "CriticalSoil", ",", "\"soil-soil_critical\"", ":", "soils", ".", "CriticalSoil", ",", "# deprecated type", "\"soil-stress_dependent_soil\"", ":", "soils", ".", "StressDependentSoil", ",", "\"soil-soil_stress_dependent\"", ":", "soils", ".", "StressDependentSoil", ",", "\"soil_profile-soil_profile\"", ":", "soils", ".", "SoilProfile", ",", "\"building-building\"", ":", "buildings", ".", "Building", ",", "\"building-frame_building\"", ":", "buildings", ".", "FrameBuilding", ",", "\"building-frame_building2D\"", ":", "buildings", ".", "FrameBuilding2D", ",", "\"building-building_frame2D\"", ":", "buildings", ".", "FrameBuilding2D", ",", "# deprecated type", "\"building-wall_building\"", ":", "buildings", ".", "WallBuilding", ",", "\"building-building_wall\"", ":", "buildings", ".", "WallBuilding", ",", "# deprecated type", "\"building-structure\"", ":", "buildings", ".", "SDOFBuilding", ",", "# Deprecated type, remove in v1", "\"building-sdof\"", ":", "buildings", ".", "SDOFBuilding", ",", "\"foundation-foundation\"", ":", "foundations", ".", "Foundation", ",", "\"foundation-foundation_raft\"", ":", "foundations", ".", "RaftFoundation", ",", "# deprecated type", "\"foundation-raft_foundation\"", ":", "foundations", ".", "RaftFoundation", ",", "\"foundation-raft\"", ":", "foundations", ".", "RaftFoundation", ",", "# Deprecated approach for type, remove in v1", "\"foundation-pad_foundation\"", ":", "foundations", ".", "PadFoundation", ",", "\"foundation-foundation_pad\"", ":", "foundations", ".", "PadFoundation", ",", "# deprecated type", "\"section-section\"", ":", "buildings", ".", "Section", ",", "\"custom_object-custom_object\"", ":", "abstract_models", ".", "CustomObject", ",", "\"system-system\"", ":", "systems", ".", "SoilStructureSystem", ",", "# deprecated type", "\"system-sfs\"", ":", "systems", ".", "SoilStructureSystem", "}", "# merge and overwrite the object map with custom maps", "# for item in custom_map:", "# obj_map[item] = custom_map[item]", "obj_map", "=", "{", "*", "*", "obj_map", ",", "*", "*", "custom_map", "}", "data_models", "=", "ecp_dict", "[", "\"models\"", "]", "exception_list", "=", "[", "]", "objs", "=", "OrderedDict", "(", ")", "collected", "=", "set", "(", "[", "]", ")", "# Set base type properly", "mtypes", "=", "list", "(", "data_models", ")", "for", "mtype", "in", "mtypes", ":", "base_type", "=", "mtype", "if", "base_type", "[", ":", "-", "1", "]", "in", "standard_types", ":", "# support the loading of old plural based ecp files", "base_type", "=", "base_type", "[", ":", "-", "1", "]", "data_models", "[", "base_type", "]", "=", "data_models", "[", "mtype", "]", "del", "data_models", "[", "mtype", "]", "for", "m_id", "in", "data_models", "[", "base_type", "]", ":", "data_models", "[", "base_type", "]", "[", "m_id", "]", "[", "\"base_type\"", "]", "=", "base_type", "for", "mtype", "in", "data_models", ":", "base_type", "=", "mtype", "if", "base_type", "in", "exception_list", ":", "continue", "collected", ".", "add", "(", "base_type", ")", "objs", "[", "base_type", "]", "=", "OrderedDict", "(", ")", "for", "m_id", "in", "data_models", "[", "mtype", "]", ":", "obj", "=", "data_models", "[", "mtype", "]", "[", "m_id", "]", "if", "\"type\"", "not", "in", "obj", ":", "obj", "[", "\"type\"", "]", "=", "base_type", "try", ":", "obj_class", "=", "obj_map", "[", "\"%s-%s\"", "%", "(", "base_type", ",", "obj", "[", "\"type\"", "]", ")", "]", "except", "KeyError", ":", "if", "obj", "[", "\"type\"", "]", "in", "deprecated_types", ":", "try", ":", "obj_class", "=", "obj_map", "[", "\"%s-%s\"", "%", "(", "base_type", ",", "deprecated_types", "[", "obj", "[", "\"type\"", "]", "]", ")", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"Map for Model: '%s' index: '%s' and type: '%s' not available, \"", "\"add '%s-%s' to custom dict\"", "%", "(", "base_type", ",", "m_id", ",", "base_type", ",", "base_type", ",", "obj", "[", "\"type\"", "]", ")", ")", "else", ":", "raise", "KeyError", "(", "\"Map for Model: '%s' index: '%s' and type: '%s' not available, \"", "\"add '%s-%s' to custom dict\"", "%", "(", "base_type", ",", "m_id", ",", "base_type", ",", "base_type", ",", "obj", "[", "\"type\"", "]", ")", ")", "try", ":", "new_instance", "=", "obj_class", "(", ")", "except", "TypeError", "as", "e", ":", "if", "\"required positional argument:\"", "in", "str", "(", "e", ")", ":", "parameters", "=", "[", "str", "(", "e", ")", ".", "split", "(", "\"argument: \"", ")", "[", "-", "1", "]", "]", "elif", "\"required positional arguments:\"", "in", "str", "(", "e", ")", ":", "p_str", "=", "str", "(", "e", ")", ".", "split", "(", "\"arguments: \"", ")", "[", "-", "1", "]", "if", "\", and \"", "in", "p_str", ":", "# if more than 2", "partial", "=", "p_str", ".", "split", "(", "\", and \"", ")", "parameters", "=", "partial", "[", "0", "]", ".", "split", "(", "\", \"", ")", "+", "partial", "[", "-", "1", ":", "]", "else", ":", "# if one", "parameters", "=", "p_str", ".", "split", "(", "\" and \"", ")", "else", ":", "raise", "TypeError", "(", "e", ")", "params", "=", "[", "]", "for", "parameter", "in", "parameters", ":", "parameter", "=", "parameter", "[", "1", ":", "-", "1", "]", "try", ":", "params", ".", "append", "(", "data_models", "[", "mtype", "]", "[", "m_id", "]", "[", "parameter", "]", ")", "except", "KeyError", "as", "e2", ":", "# To be removed and just raise exception", "deprecation", "(", "\"Your file is out of date, \"", "\"run sfsimodels.migrate_ecp(<file-path>, <out-file-path>).\"", ")", "if", "mtype", "==", "\"building\"", ":", "params", "=", "[", "len", "(", "data_models", "[", "mtype", "]", "[", "m_id", "]", "[", "\"storey_masses\"", "]", ")", "]", "# n_storeys", "if", "\"frame\"", "in", "data_models", "[", "mtype", "]", "[", "m_id", "]", "[", "\"type\"", "]", ":", "params", ".", "append", "(", "len", "(", "data_models", "[", "mtype", "]", "[", "m_id", "]", "[", "\"bay_lengths\"", "]", ")", ")", "else", ":", "raise", "KeyError", "(", "\"Can't find required positional argument: {0} for {1} id: {2}\"", ".", "format", "(", "parameter", ",", "mtype", ",", "m_id", ")", ")", "new_instance", "=", "obj_class", "(", "*", "params", ")", "add_to_obj", "(", "new_instance", ",", "data_models", "[", "mtype", "]", "[", "m_id", "]", ",", "objs", "=", "objs", ",", "verbose", "=", "verbose", ")", "# print(mtype, m_id)", "objs", "[", "base_type", "]", "[", "int", "(", "data_models", "[", "mtype", "]", "[", "m_id", "]", "[", "\"id\"", "]", ")", "]", "=", "new_instance", "# Deal with all the exceptions", "# for mtype in data_models:", "# base_type = mtype", "#", "# if base_type in collected:", "# continue", "# if base_type not in objs:", "# objs[base_type] = OrderedDict()", "all_bts", "=", "list", "(", "objs", ")", "for", "base_type", "in", "all_bts", ":", "# Support for old style ecp file", "if", "base_type", "in", "standard_types", ":", "objs", "[", "base_type", "+", "\"s\"", "]", "=", "objs", "[", "base_type", "]", "return", "objs" ]
Given an ecp dictionary, build a dictionary of sfsi objects :param ecp_dict: dict, engineering consistency project dictionary :param custom: dict, used to load custom objects, {model type: custom object} :param verbose: int, console output :return: dict
[ "Given", "an", "ecp", "dictionary", "build", "a", "dictionary", "of", "sfsi", "objects" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L71-L202
eng-tools/sfsimodels
sfsimodels/files.py
migrate_ecp
def migrate_ecp(in_ffp, out_ffp): """Migrates and ECP file to the current version of sfsimodels""" objs, meta_data = load_json_and_meta(in_ffp) ecp_output = Output() for m_type in objs: for instance in objs[m_type]: ecp_output.add_to_dict(objs[m_type][instance]) ecp_output.name = meta_data["name"] ecp_output.units = meta_data["units"] ecp_output.comments = meta_data["comments"] ecp_output.sfsimodels_version = meta_data["sfsimodels_version"] p_str = json.dumps(ecp_output.to_dict(), skipkeys=["__repr__"], indent=4) a = open(out_ffp, "w") a.write(p_str) a.close()
python
def migrate_ecp(in_ffp, out_ffp): """Migrates and ECP file to the current version of sfsimodels""" objs, meta_data = load_json_and_meta(in_ffp) ecp_output = Output() for m_type in objs: for instance in objs[m_type]: ecp_output.add_to_dict(objs[m_type][instance]) ecp_output.name = meta_data["name"] ecp_output.units = meta_data["units"] ecp_output.comments = meta_data["comments"] ecp_output.sfsimodels_version = meta_data["sfsimodels_version"] p_str = json.dumps(ecp_output.to_dict(), skipkeys=["__repr__"], indent=4) a = open(out_ffp, "w") a.write(p_str) a.close()
[ "def", "migrate_ecp", "(", "in_ffp", ",", "out_ffp", ")", ":", "objs", ",", "meta_data", "=", "load_json_and_meta", "(", "in_ffp", ")", "ecp_output", "=", "Output", "(", ")", "for", "m_type", "in", "objs", ":", "for", "instance", "in", "objs", "[", "m_type", "]", ":", "ecp_output", ".", "add_to_dict", "(", "objs", "[", "m_type", "]", "[", "instance", "]", ")", "ecp_output", ".", "name", "=", "meta_data", "[", "\"name\"", "]", "ecp_output", ".", "units", "=", "meta_data", "[", "\"units\"", "]", "ecp_output", ".", "comments", "=", "meta_data", "[", "\"comments\"", "]", "ecp_output", ".", "sfsimodels_version", "=", "meta_data", "[", "\"sfsimodels_version\"", "]", "p_str", "=", "json", ".", "dumps", "(", "ecp_output", ".", "to_dict", "(", ")", ",", "skipkeys", "=", "[", "\"__repr__\"", "]", ",", "indent", "=", "4", ")", "a", "=", "open", "(", "out_ffp", ",", "\"w\"", ")", "a", ".", "write", "(", "p_str", ")", "a", ".", "close", "(", ")" ]
Migrates and ECP file to the current version of sfsimodels
[ "Migrates", "and", "ECP", "file", "to", "the", "current", "version", "of", "sfsimodels" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L287-L301
eng-tools/sfsimodels
sfsimodels/files.py
Output.add_to_dict
def add_to_dict(self, an_object, extras=None): """ Convert models to json serialisable output :param an_object: An instance of a model object :param extras: A dictionary of extra variables that should be :return: """ if an_object.id is None: raise ModelError("id must be set on object before adding to output.") if hasattr(an_object, "base_type"): mtype = an_object.base_type elif hasattr(an_object, "type"): if an_object.type in standard_types: mtype = an_object.type else: mtype = "custom_type" else: raise ModelError("Object does not have attribute 'base_type' or 'type', cannot add to output.") if mtype not in self.unordered_models: # Catch any custom objects self.unordered_models[mtype] = OrderedDict() if hasattr(an_object, "add_to_dict"): an_object.add_to_dict(self.unordered_models) elif hasattr(an_object, "to_dict"): self.unordered_models[mtype][an_object.unique_hash] = an_object.to_dict(compression=self.compression) else: raise ModelError("Object does not have method 'to_dict', cannot add to output.")
python
def add_to_dict(self, an_object, extras=None): """ Convert models to json serialisable output :param an_object: An instance of a model object :param extras: A dictionary of extra variables that should be :return: """ if an_object.id is None: raise ModelError("id must be set on object before adding to output.") if hasattr(an_object, "base_type"): mtype = an_object.base_type elif hasattr(an_object, "type"): if an_object.type in standard_types: mtype = an_object.type else: mtype = "custom_type" else: raise ModelError("Object does not have attribute 'base_type' or 'type', cannot add to output.") if mtype not in self.unordered_models: # Catch any custom objects self.unordered_models[mtype] = OrderedDict() if hasattr(an_object, "add_to_dict"): an_object.add_to_dict(self.unordered_models) elif hasattr(an_object, "to_dict"): self.unordered_models[mtype][an_object.unique_hash] = an_object.to_dict(compression=self.compression) else: raise ModelError("Object does not have method 'to_dict', cannot add to output.")
[ "def", "add_to_dict", "(", "self", ",", "an_object", ",", "extras", "=", "None", ")", ":", "if", "an_object", ".", "id", "is", "None", ":", "raise", "ModelError", "(", "\"id must be set on object before adding to output.\"", ")", "if", "hasattr", "(", "an_object", ",", "\"base_type\"", ")", ":", "mtype", "=", "an_object", ".", "base_type", "elif", "hasattr", "(", "an_object", ",", "\"type\"", ")", ":", "if", "an_object", ".", "type", "in", "standard_types", ":", "mtype", "=", "an_object", ".", "type", "else", ":", "mtype", "=", "\"custom_type\"", "else", ":", "raise", "ModelError", "(", "\"Object does not have attribute 'base_type' or 'type', cannot add to output.\"", ")", "if", "mtype", "not", "in", "self", ".", "unordered_models", ":", "# Catch any custom objects", "self", ".", "unordered_models", "[", "mtype", "]", "=", "OrderedDict", "(", ")", "if", "hasattr", "(", "an_object", ",", "\"add_to_dict\"", ")", ":", "an_object", ".", "add_to_dict", "(", "self", ".", "unordered_models", ")", "elif", "hasattr", "(", "an_object", ",", "\"to_dict\"", ")", ":", "self", ".", "unordered_models", "[", "mtype", "]", "[", "an_object", ".", "unique_hash", "]", "=", "an_object", ".", "to_dict", "(", "compression", "=", "self", ".", "compression", ")", "else", ":", "raise", "ModelError", "(", "\"Object does not have method 'to_dict', cannot add to output.\"", ")" ]
Convert models to json serialisable output :param an_object: An instance of a model object :param extras: A dictionary of extra variables that should be :return:
[ "Convert", "models", "to", "json", "serialisable", "output" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L216-L244
eng-tools/sfsimodels
sfsimodels/files.py
Output.add_to_output
def add_to_output(self, mtype, m_id, serialisable_dict): """ Can add additional objects or dictionaries to output file that don't conform to standard objects. :param mtype: :param m_id: :param serialisable_dict: :return: """ if mtype not in self.unordered_models: self.unordered_models[mtype] = OrderedDict() self.unordered_models[mtype][m_id] = serialisable_dict
python
def add_to_output(self, mtype, m_id, serialisable_dict): """ Can add additional objects or dictionaries to output file that don't conform to standard objects. :param mtype: :param m_id: :param serialisable_dict: :return: """ if mtype not in self.unordered_models: self.unordered_models[mtype] = OrderedDict() self.unordered_models[mtype][m_id] = serialisable_dict
[ "def", "add_to_output", "(", "self", ",", "mtype", ",", "m_id", ",", "serialisable_dict", ")", ":", "if", "mtype", "not", "in", "self", ".", "unordered_models", ":", "self", ".", "unordered_models", "[", "mtype", "]", "=", "OrderedDict", "(", ")", "self", ".", "unordered_models", "[", "mtype", "]", "[", "m_id", "]", "=", "serialisable_dict" ]
Can add additional objects or dictionaries to output file that don't conform to standard objects. :param mtype: :param m_id: :param serialisable_dict: :return:
[ "Can", "add", "additional", "objects", "or", "dictionaries", "to", "output", "file", "that", "don", "t", "conform", "to", "standard", "objects", "." ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L246-L257
eng-tools/sfsimodels
sfsimodels/files.py
Output.models
def models(self): """Unhashed""" models_dict = OrderedDict() collected = [] for item in standard_types: if item in self.unordered_models: new_dict, replacement_dict = unhash_dict(self.unordered_models[item]) models_dict[item] = new_dict collected.append(item) for item in self.unordered_models: # print("item: ", item) if item not in collected: new_dict, replacement_dict = unhash_dict(self.unordered_models[item]) models_dict[item] = new_dict return models_dict
python
def models(self): """Unhashed""" models_dict = OrderedDict() collected = [] for item in standard_types: if item in self.unordered_models: new_dict, replacement_dict = unhash_dict(self.unordered_models[item]) models_dict[item] = new_dict collected.append(item) for item in self.unordered_models: # print("item: ", item) if item not in collected: new_dict, replacement_dict = unhash_dict(self.unordered_models[item]) models_dict[item] = new_dict return models_dict
[ "def", "models", "(", "self", ")", ":", "models_dict", "=", "OrderedDict", "(", ")", "collected", "=", "[", "]", "for", "item", "in", "standard_types", ":", "if", "item", "in", "self", ".", "unordered_models", ":", "new_dict", ",", "replacement_dict", "=", "unhash_dict", "(", "self", ".", "unordered_models", "[", "item", "]", ")", "models_dict", "[", "item", "]", "=", "new_dict", "collected", ".", "append", "(", "item", ")", "for", "item", "in", "self", ".", "unordered_models", ":", "# print(\"item: \", item)", "if", "item", "not", "in", "collected", ":", "new_dict", ",", "replacement_dict", "=", "unhash_dict", "(", "self", ".", "unordered_models", "[", "item", "]", ")", "models_dict", "[", "item", "]", "=", "new_dict", "return", "models_dict" ]
Unhashed
[ "Unhashed" ]
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L260-L274
PolyJIT/benchbuild
benchbuild/utils/settings.py
available_cpu_count
def available_cpu_count() -> int: """ Get the number of available CPUs. Number of available virtual or physical CPUs on this system, i.e. user/real as output by time(1) when called with an optimally scaling userspace-only program. Returns: Number of avaialable CPUs. """ # cpuset # cpuset may restrict the number of *available* processors try: match = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', open('/proc/self/status').read()) if match: res = bin(int(match.group(1).replace(',', ''), 16)).count('1') if res > 0: return res except IOError: LOG.debug("Could not get the number of allowed CPUs") # http://code.google.com/p/psutil/ try: import psutil return psutil.cpu_count() # psutil.NUM_CPUS on old versions except (ImportError, AttributeError): LOG.debug("Could not get the number of allowed CPUs") # POSIX try: res = int(os.sysconf('SC_NPROCESSORS_ONLN')) if res > 0: return res except (AttributeError, ValueError): LOG.debug("Could not get the number of allowed CPUs") # Linux try: res = open('/proc/cpuinfo').read().count('processor\t:') if res > 0: return res except IOError: LOG.debug("Could not get the number of allowed CPUs") raise Exception('Can not determine number of CPUs on this system')
python
def available_cpu_count() -> int: """ Get the number of available CPUs. Number of available virtual or physical CPUs on this system, i.e. user/real as output by time(1) when called with an optimally scaling userspace-only program. Returns: Number of avaialable CPUs. """ # cpuset # cpuset may restrict the number of *available* processors try: match = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', open('/proc/self/status').read()) if match: res = bin(int(match.group(1).replace(',', ''), 16)).count('1') if res > 0: return res except IOError: LOG.debug("Could not get the number of allowed CPUs") # http://code.google.com/p/psutil/ try: import psutil return psutil.cpu_count() # psutil.NUM_CPUS on old versions except (ImportError, AttributeError): LOG.debug("Could not get the number of allowed CPUs") # POSIX try: res = int(os.sysconf('SC_NPROCESSORS_ONLN')) if res > 0: return res except (AttributeError, ValueError): LOG.debug("Could not get the number of allowed CPUs") # Linux try: res = open('/proc/cpuinfo').read().count('processor\t:') if res > 0: return res except IOError: LOG.debug("Could not get the number of allowed CPUs") raise Exception('Can not determine number of CPUs on this system')
[ "def", "available_cpu_count", "(", ")", "->", "int", ":", "# cpuset", "# cpuset may restrict the number of *available* processors", "try", ":", "match", "=", "re", ".", "search", "(", "r'(?m)^Cpus_allowed:\\s*(.*)$'", ",", "open", "(", "'/proc/self/status'", ")", ".", "read", "(", ")", ")", "if", "match", ":", "res", "=", "bin", "(", "int", "(", "match", ".", "group", "(", "1", ")", ".", "replace", "(", "','", ",", "''", ")", ",", "16", ")", ")", ".", "count", "(", "'1'", ")", "if", "res", ">", "0", ":", "return", "res", "except", "IOError", ":", "LOG", ".", "debug", "(", "\"Could not get the number of allowed CPUs\"", ")", "# http://code.google.com/p/psutil/", "try", ":", "import", "psutil", "return", "psutil", ".", "cpu_count", "(", ")", "# psutil.NUM_CPUS on old versions", "except", "(", "ImportError", ",", "AttributeError", ")", ":", "LOG", ".", "debug", "(", "\"Could not get the number of allowed CPUs\"", ")", "# POSIX", "try", ":", "res", "=", "int", "(", "os", ".", "sysconf", "(", "'SC_NPROCESSORS_ONLN'", ")", ")", "if", "res", ">", "0", ":", "return", "res", "except", "(", "AttributeError", ",", "ValueError", ")", ":", "LOG", ".", "debug", "(", "\"Could not get the number of allowed CPUs\"", ")", "# Linux", "try", ":", "res", "=", "open", "(", "'/proc/cpuinfo'", ")", ".", "read", "(", ")", ".", "count", "(", "'processor\\t:'", ")", "if", "res", ">", "0", ":", "return", "res", "except", "IOError", ":", "LOG", ".", "debug", "(", "\"Could not get the number of allowed CPUs\"", ")", "raise", "Exception", "(", "'Can not determine number of CPUs on this system'", ")" ]
Get the number of available CPUs. Number of available virtual or physical CPUs on this system, i.e. user/real as output by time(1) when called with an optimally scaling userspace-only program. Returns: Number of avaialable CPUs.
[ "Get", "the", "number", "of", "available", "CPUs", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L36-L85
PolyJIT/benchbuild
benchbuild/utils/settings.py
escape_yaml
def escape_yaml(raw_str: str) -> str: """ Shell-Escape a yaml input string. Args: raw_str: The unescaped string. """ escape_list = [char for char in raw_str if char in ['!', '{', '[']] if len(escape_list) == 0: return raw_str str_quotes = '"' i_str_quotes = "'" if str_quotes in raw_str and str_quotes not in raw_str[1:-1]: return raw_str if str_quotes in raw_str[1:-1]: raw_str = i_str_quotes + raw_str + i_str_quotes else: raw_str = str_quotes + raw_str + str_quotes return raw_str
python
def escape_yaml(raw_str: str) -> str: """ Shell-Escape a yaml input string. Args: raw_str: The unescaped string. """ escape_list = [char for char in raw_str if char in ['!', '{', '[']] if len(escape_list) == 0: return raw_str str_quotes = '"' i_str_quotes = "'" if str_quotes in raw_str and str_quotes not in raw_str[1:-1]: return raw_str if str_quotes in raw_str[1:-1]: raw_str = i_str_quotes + raw_str + i_str_quotes else: raw_str = str_quotes + raw_str + str_quotes return raw_str
[ "def", "escape_yaml", "(", "raw_str", ":", "str", ")", "->", "str", ":", "escape_list", "=", "[", "char", "for", "char", "in", "raw_str", "if", "char", "in", "[", "'!'", ",", "'{'", ",", "'['", "]", "]", "if", "len", "(", "escape_list", ")", "==", "0", ":", "return", "raw_str", "str_quotes", "=", "'\"'", "i_str_quotes", "=", "\"'\"", "if", "str_quotes", "in", "raw_str", "and", "str_quotes", "not", "in", "raw_str", "[", "1", ":", "-", "1", "]", ":", "return", "raw_str", "if", "str_quotes", "in", "raw_str", "[", "1", ":", "-", "1", "]", ":", "raw_str", "=", "i_str_quotes", "+", "raw_str", "+", "i_str_quotes", "else", ":", "raw_str", "=", "str_quotes", "+", "raw_str", "+", "str_quotes", "return", "raw_str" ]
Shell-Escape a yaml input string. Args: raw_str: The unescaped string.
[ "Shell", "-", "Escape", "a", "yaml", "input", "string", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L92-L112
PolyJIT/benchbuild
benchbuild/utils/settings.py
to_yaml
def to_yaml(value) -> str: """Convert a given value to a YAML string.""" stream = yaml.io.StringIO() dumper = ConfigDumper(stream, default_flow_style=True, width=sys.maxsize) val = None try: dumper.open() dumper.represent(value) val = stream.getvalue().strip() dumper.close() finally: dumper.dispose() return val
python
def to_yaml(value) -> str: """Convert a given value to a YAML string.""" stream = yaml.io.StringIO() dumper = ConfigDumper(stream, default_flow_style=True, width=sys.maxsize) val = None try: dumper.open() dumper.represent(value) val = stream.getvalue().strip() dumper.close() finally: dumper.dispose() return val
[ "def", "to_yaml", "(", "value", ")", "->", "str", ":", "stream", "=", "yaml", ".", "io", ".", "StringIO", "(", ")", "dumper", "=", "ConfigDumper", "(", "stream", ",", "default_flow_style", "=", "True", ",", "width", "=", "sys", ".", "maxsize", ")", "val", "=", "None", "try", ":", "dumper", ".", "open", "(", ")", "dumper", ".", "represent", "(", "value", ")", "val", "=", "stream", ".", "getvalue", "(", ")", ".", "strip", "(", ")", "dumper", ".", "close", "(", ")", "finally", ":", "dumper", ".", "dispose", "(", ")", "return", "val" ]
Convert a given value to a YAML string.
[ "Convert", "a", "given", "value", "to", "a", "YAML", "string", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L130-L143
PolyJIT/benchbuild
benchbuild/utils/settings.py
to_env_var
def to_env_var(env_var: str, value) -> str: """ Create an environment variable from a name and a value. This generates a shell-compatible representation of an environment variable that is assigned a YAML representation of a value. Args: env_var (str): Name of the environment variable. value (Any): A value we convert from. """ val = to_yaml(value) ret_val = "%s=%s" % (env_var, escape_yaml(val)) return ret_val
python
def to_env_var(env_var: str, value) -> str: """ Create an environment variable from a name and a value. This generates a shell-compatible representation of an environment variable that is assigned a YAML representation of a value. Args: env_var (str): Name of the environment variable. value (Any): A value we convert from. """ val = to_yaml(value) ret_val = "%s=%s" % (env_var, escape_yaml(val)) return ret_val
[ "def", "to_env_var", "(", "env_var", ":", "str", ",", "value", ")", "->", "str", ":", "val", "=", "to_yaml", "(", "value", ")", "ret_val", "=", "\"%s=%s\"", "%", "(", "env_var", ",", "escape_yaml", "(", "val", ")", ")", "return", "ret_val" ]
Create an environment variable from a name and a value. This generates a shell-compatible representation of an environment variable that is assigned a YAML representation of a value. Args: env_var (str): Name of the environment variable. value (Any): A value we convert from.
[ "Create", "an", "environment", "variable", "from", "a", "name", "and", "a", "value", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L146-L160
PolyJIT/benchbuild
benchbuild/utils/settings.py
find_config
def find_config(test_file=None, defaults=None, root=os.curdir): """ Find the path to the default config file. We look at :root: for the :default: config file. If we can't find it there we start looking at the parent directory recursively until we find a file named :default: and return the absolute path to it. If we can't find anything, we return None. Args: default: The name of the config file we look for. root: The directory to start looking for. Returns: Path to the default config file, None if we can't find anything. """ if defaults is None: defaults = [".benchbuild.yml", ".benchbuild.yaml"] def walk_rec(cur_path, root): cur_path = local.path(root) / test_file if cur_path.exists(): return cur_path new_root = local.path(root) / os.pardir return walk_rec(cur_path, new_root) if new_root != root else None if test_file is not None: return walk_rec(test_file, root) for test_file in defaults: ret = walk_rec(test_file, root) if ret is not None: return ret
python
def find_config(test_file=None, defaults=None, root=os.curdir): """ Find the path to the default config file. We look at :root: for the :default: config file. If we can't find it there we start looking at the parent directory recursively until we find a file named :default: and return the absolute path to it. If we can't find anything, we return None. Args: default: The name of the config file we look for. root: The directory to start looking for. Returns: Path to the default config file, None if we can't find anything. """ if defaults is None: defaults = [".benchbuild.yml", ".benchbuild.yaml"] def walk_rec(cur_path, root): cur_path = local.path(root) / test_file if cur_path.exists(): return cur_path new_root = local.path(root) / os.pardir return walk_rec(cur_path, new_root) if new_root != root else None if test_file is not None: return walk_rec(test_file, root) for test_file in defaults: ret = walk_rec(test_file, root) if ret is not None: return ret
[ "def", "find_config", "(", "test_file", "=", "None", ",", "defaults", "=", "None", ",", "root", "=", "os", ".", "curdir", ")", ":", "if", "defaults", "is", "None", ":", "defaults", "=", "[", "\".benchbuild.yml\"", ",", "\".benchbuild.yaml\"", "]", "def", "walk_rec", "(", "cur_path", ",", "root", ")", ":", "cur_path", "=", "local", ".", "path", "(", "root", ")", "/", "test_file", "if", "cur_path", ".", "exists", "(", ")", ":", "return", "cur_path", "new_root", "=", "local", ".", "path", "(", "root", ")", "/", "os", ".", "pardir", "return", "walk_rec", "(", "cur_path", ",", "new_root", ")", "if", "new_root", "!=", "root", "else", "None", "if", "test_file", "is", "not", "None", ":", "return", "walk_rec", "(", "test_file", ",", "root", ")", "for", "test_file", "in", "defaults", ":", "ret", "=", "walk_rec", "(", "test_file", ",", "root", ")", "if", "ret", "is", "not", "None", ":", "return", "ret" ]
Find the path to the default config file. We look at :root: for the :default: config file. If we can't find it there we start looking at the parent directory recursively until we find a file named :default: and return the absolute path to it. If we can't find anything, we return None. Args: default: The name of the config file we look for. root: The directory to start looking for. Returns: Path to the default config file, None if we can't find anything.
[ "Find", "the", "path", "to", "the", "default", "config", "file", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L552-L585
PolyJIT/benchbuild
benchbuild/utils/settings.py
setup_config
def setup_config(cfg, config_filenames=None, env_var_name=None): """ This will initialize the given configuration object. The following resources are available in the same order: 1) Default settings. 2) Config file. 3) Environment variables. WARNING: Environment variables do _not_ take precedence over the config file right now. (init_from_env will refuse to update the value, if there is already one.) Args: config_filenames: list of possible config filenames env_var_name: name of the environment variable holding the config path """ if env_var_name is None: env_var_name = "BB_CONFIG_FILE" config_path = os.getenv(env_var_name, None) if not config_path: config_path = find_config(defaults=config_filenames) if config_path: cfg.load(config_path) cfg["config_file"] = os.path.abspath(config_path) cfg.init_from_env()
python
def setup_config(cfg, config_filenames=None, env_var_name=None): """ This will initialize the given configuration object. The following resources are available in the same order: 1) Default settings. 2) Config file. 3) Environment variables. WARNING: Environment variables do _not_ take precedence over the config file right now. (init_from_env will refuse to update the value, if there is already one.) Args: config_filenames: list of possible config filenames env_var_name: name of the environment variable holding the config path """ if env_var_name is None: env_var_name = "BB_CONFIG_FILE" config_path = os.getenv(env_var_name, None) if not config_path: config_path = find_config(defaults=config_filenames) if config_path: cfg.load(config_path) cfg["config_file"] = os.path.abspath(config_path) cfg.init_from_env()
[ "def", "setup_config", "(", "cfg", ",", "config_filenames", "=", "None", ",", "env_var_name", "=", "None", ")", ":", "if", "env_var_name", "is", "None", ":", "env_var_name", "=", "\"BB_CONFIG_FILE\"", "config_path", "=", "os", ".", "getenv", "(", "env_var_name", ",", "None", ")", "if", "not", "config_path", ":", "config_path", "=", "find_config", "(", "defaults", "=", "config_filenames", ")", "if", "config_path", ":", "cfg", ".", "load", "(", "config_path", ")", "cfg", "[", "\"config_file\"", "]", "=", "os", ".", "path", ".", "abspath", "(", "config_path", ")", "cfg", ".", "init_from_env", "(", ")" ]
This will initialize the given configuration object. The following resources are available in the same order: 1) Default settings. 2) Config file. 3) Environment variables. WARNING: Environment variables do _not_ take precedence over the config file right now. (init_from_env will refuse to update the value, if there is already one.) Args: config_filenames: list of possible config filenames env_var_name: name of the environment variable holding the config path
[ "This", "will", "initialize", "the", "given", "configuration", "object", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L588-L615
PolyJIT/benchbuild
benchbuild/utils/settings.py
upgrade
def upgrade(cfg): """Provide forward migration for configuration files.""" db_node = cfg["db"] old_db_elems = ["host", "name", "port", "pass", "user", "dialect"] has_old_db_elems = [x in db_node for x in old_db_elems] if any(has_old_db_elems): print("Old database configuration found. " "Converting to new connect_string. " "This will *not* be stored in the configuration automatically.") cfg["db"]["connect_string"] = \ "{dialect}://{user}:{password}@{host}:{port}/{name}".format( dialect=cfg["db"]["dialect"]["value"], user=cfg["db"]["user"]["value"], password=cfg["db"]["pass"]["value"], host=cfg["db"]["host"]["value"], port=cfg["db"]["port"]["value"], name=cfg["db"]["name"]["value"])
python
def upgrade(cfg): """Provide forward migration for configuration files.""" db_node = cfg["db"] old_db_elems = ["host", "name", "port", "pass", "user", "dialect"] has_old_db_elems = [x in db_node for x in old_db_elems] if any(has_old_db_elems): print("Old database configuration found. " "Converting to new connect_string. " "This will *not* be stored in the configuration automatically.") cfg["db"]["connect_string"] = \ "{dialect}://{user}:{password}@{host}:{port}/{name}".format( dialect=cfg["db"]["dialect"]["value"], user=cfg["db"]["user"]["value"], password=cfg["db"]["pass"]["value"], host=cfg["db"]["host"]["value"], port=cfg["db"]["port"]["value"], name=cfg["db"]["name"]["value"])
[ "def", "upgrade", "(", "cfg", ")", ":", "db_node", "=", "cfg", "[", "\"db\"", "]", "old_db_elems", "=", "[", "\"host\"", ",", "\"name\"", ",", "\"port\"", ",", "\"pass\"", ",", "\"user\"", ",", "\"dialect\"", "]", "has_old_db_elems", "=", "[", "x", "in", "db_node", "for", "x", "in", "old_db_elems", "]", "if", "any", "(", "has_old_db_elems", ")", ":", "print", "(", "\"Old database configuration found. \"", "\"Converting to new connect_string. \"", "\"This will *not* be stored in the configuration automatically.\"", ")", "cfg", "[", "\"db\"", "]", "[", "\"connect_string\"", "]", "=", "\"{dialect}://{user}:{password}@{host}:{port}/{name}\"", ".", "format", "(", "dialect", "=", "cfg", "[", "\"db\"", "]", "[", "\"dialect\"", "]", "[", "\"value\"", "]", ",", "user", "=", "cfg", "[", "\"db\"", "]", "[", "\"user\"", "]", "[", "\"value\"", "]", ",", "password", "=", "cfg", "[", "\"db\"", "]", "[", "\"pass\"", "]", "[", "\"value\"", "]", ",", "host", "=", "cfg", "[", "\"db\"", "]", "[", "\"host\"", "]", "[", "\"value\"", "]", ",", "port", "=", "cfg", "[", "\"db\"", "]", "[", "\"port\"", "]", "[", "\"value\"", "]", ",", "name", "=", "cfg", "[", "\"db\"", "]", "[", "\"name\"", "]", "[", "\"value\"", "]", ")" ]
Provide forward migration for configuration files.
[ "Provide", "forward", "migration", "for", "configuration", "files", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L640-L657
PolyJIT/benchbuild
benchbuild/utils/settings.py
uuid_constructor
def uuid_constructor(loader, node): """" Construct a uuid.UUID object form a scalar YAML node. Tests: >>> yaml.add_constructor("!uuid", uuid_constructor, Loader=yaml.SafeLoader) >>> yaml.safe_load("{'test': !uuid 'cc3702ca-699a-4aa6-8226-4c938f294d9b'}") {'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')} """ value = loader.construct_scalar(node) return uuid.UUID(value)
python
def uuid_constructor(loader, node): """" Construct a uuid.UUID object form a scalar YAML node. Tests: >>> yaml.add_constructor("!uuid", uuid_constructor, Loader=yaml.SafeLoader) >>> yaml.safe_load("{'test': !uuid 'cc3702ca-699a-4aa6-8226-4c938f294d9b'}") {'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')} """ value = loader.construct_scalar(node) return uuid.UUID(value)
[ "def", "uuid_constructor", "(", "loader", ",", "node", ")", ":", "value", "=", "loader", ".", "construct_scalar", "(", "node", ")", "return", "uuid", ".", "UUID", "(", "value", ")" ]
Construct a uuid.UUID object form a scalar YAML node. Tests: >>> yaml.add_constructor("!uuid", uuid_constructor, Loader=yaml.SafeLoader) >>> yaml.safe_load("{'test': !uuid 'cc3702ca-699a-4aa6-8226-4c938f294d9b'}") {'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')}
[ "Construct", "a", "uuid", ".", "UUID", "object", "form", "a", "scalar", "YAML", "node", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L672-L683
PolyJIT/benchbuild
benchbuild/utils/settings.py
uuid_add_implicit_resolver
def uuid_add_implicit_resolver(Loader=ConfigLoader, Dumper=ConfigDumper): """ Attach an implicit pattern resolver for UUID objects. Tests: >>> class TestDumper(yaml.SafeDumper): pass >>> class TestLoader(yaml.SafeLoader): pass >>> TUUID = 'cc3702ca-699a-4aa6-8226-4c938f294d9b' >>> IN = {'test': uuid.UUID(TUUID)} >>> OUT = '{test: cc3702ca-699a-4aa6-8226-4c938f294d9b}' >>> yaml.add_representer(uuid.UUID, uuid_representer, Dumper=TestDumper) >>> yaml.add_constructor('!uuid', uuid_constructor, Loader=TestLoader) >>> uuid_add_implicit_resolver(Loader=TestLoader, Dumper=TestDumper) >>> yaml.dump(IN, Dumper=TestDumper) 'test: cc3702ca-699a-4aa6-8226-4c938f294d9b\\n' >>> yaml.load(OUT, Loader=TestLoader) {'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')} """ uuid_regex = r'^\b[a-f0-9]{8}-\b[a-f0-9]{4}-\b[a-f0-9]{4}-\b[a-f0-9]{4}-\b[a-f0-9]{12}$' pattern = re.compile(uuid_regex) yaml.add_implicit_resolver('!uuid', pattern, Loader=Loader, Dumper=Dumper)
python
def uuid_add_implicit_resolver(Loader=ConfigLoader, Dumper=ConfigDumper): """ Attach an implicit pattern resolver for UUID objects. Tests: >>> class TestDumper(yaml.SafeDumper): pass >>> class TestLoader(yaml.SafeLoader): pass >>> TUUID = 'cc3702ca-699a-4aa6-8226-4c938f294d9b' >>> IN = {'test': uuid.UUID(TUUID)} >>> OUT = '{test: cc3702ca-699a-4aa6-8226-4c938f294d9b}' >>> yaml.add_representer(uuid.UUID, uuid_representer, Dumper=TestDumper) >>> yaml.add_constructor('!uuid', uuid_constructor, Loader=TestLoader) >>> uuid_add_implicit_resolver(Loader=TestLoader, Dumper=TestDumper) >>> yaml.dump(IN, Dumper=TestDumper) 'test: cc3702ca-699a-4aa6-8226-4c938f294d9b\\n' >>> yaml.load(OUT, Loader=TestLoader) {'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')} """ uuid_regex = r'^\b[a-f0-9]{8}-\b[a-f0-9]{4}-\b[a-f0-9]{4}-\b[a-f0-9]{4}-\b[a-f0-9]{12}$' pattern = re.compile(uuid_regex) yaml.add_implicit_resolver('!uuid', pattern, Loader=Loader, Dumper=Dumper)
[ "def", "uuid_add_implicit_resolver", "(", "Loader", "=", "ConfigLoader", ",", "Dumper", "=", "ConfigDumper", ")", ":", "uuid_regex", "=", "r'^\\b[a-f0-9]{8}-\\b[a-f0-9]{4}-\\b[a-f0-9]{4}-\\b[a-f0-9]{4}-\\b[a-f0-9]{12}$'", "pattern", "=", "re", ".", "compile", "(", "uuid_regex", ")", "yaml", ".", "add_implicit_resolver", "(", "'!uuid'", ",", "pattern", ",", "Loader", "=", "Loader", ",", "Dumper", "=", "Dumper", ")" ]
Attach an implicit pattern resolver for UUID objects. Tests: >>> class TestDumper(yaml.SafeDumper): pass >>> class TestLoader(yaml.SafeLoader): pass >>> TUUID = 'cc3702ca-699a-4aa6-8226-4c938f294d9b' >>> IN = {'test': uuid.UUID(TUUID)} >>> OUT = '{test: cc3702ca-699a-4aa6-8226-4c938f294d9b}' >>> yaml.add_representer(uuid.UUID, uuid_representer, Dumper=TestDumper) >>> yaml.add_constructor('!uuid', uuid_constructor, Loader=TestLoader) >>> uuid_add_implicit_resolver(Loader=TestLoader, Dumper=TestDumper) >>> yaml.dump(IN, Dumper=TestDumper) 'test: cc3702ca-699a-4aa6-8226-4c938f294d9b\\n' >>> yaml.load(OUT, Loader=TestLoader) {'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')}
[ "Attach", "an", "implicit", "pattern", "resolver", "for", "UUID", "objects", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L686-L708
PolyJIT/benchbuild
benchbuild/utils/settings.py
Configuration.store
def store(self, config_file): """ Store the configuration dictionary to a file.""" selfcopy = copy.deepcopy(self) selfcopy.filter_exports() with open(config_file, 'w') as outf: yaml.dump( selfcopy.node, outf, width=80, indent=4, default_flow_style=False, Dumper=ConfigDumper)
python
def store(self, config_file): """ Store the configuration dictionary to a file.""" selfcopy = copy.deepcopy(self) selfcopy.filter_exports() with open(config_file, 'w') as outf: yaml.dump( selfcopy.node, outf, width=80, indent=4, default_flow_style=False, Dumper=ConfigDumper)
[ "def", "store", "(", "self", ",", "config_file", ")", ":", "selfcopy", "=", "copy", ".", "deepcopy", "(", "self", ")", "selfcopy", ".", "filter_exports", "(", ")", "with", "open", "(", "config_file", ",", "'w'", ")", "as", "outf", ":", "yaml", ".", "dump", "(", "selfcopy", ".", "node", ",", "outf", ",", "width", "=", "80", ",", "indent", "=", "4", ",", "default_flow_style", "=", "False", ",", "Dumper", "=", "ConfigDumper", ")" ]
Store the configuration dictionary to a file.
[ "Store", "the", "configuration", "dictionary", "to", "a", "file", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L216-L229
PolyJIT/benchbuild
benchbuild/utils/settings.py
Configuration.load
def load(self, _from): """Load the configuration dictionary from file.""" def load_rec(inode, config): """Recursive part of loading.""" for k in config: if isinstance(config[k], dict) and \ k not in ['value', 'default']: if k in inode: load_rec(inode[k], config[k]) else: LOG.debug("+ config element: '%s'", k) else: inode[k] = config[k] with open(_from, 'r') as infile: obj = yaml.load(infile, Loader=ConfigLoader) upgrade(obj) load_rec(self.node, obj) self['config_file'] = os.path.abspath(_from)
python
def load(self, _from): """Load the configuration dictionary from file.""" def load_rec(inode, config): """Recursive part of loading.""" for k in config: if isinstance(config[k], dict) and \ k not in ['value', 'default']: if k in inode: load_rec(inode[k], config[k]) else: LOG.debug("+ config element: '%s'", k) else: inode[k] = config[k] with open(_from, 'r') as infile: obj = yaml.load(infile, Loader=ConfigLoader) upgrade(obj) load_rec(self.node, obj) self['config_file'] = os.path.abspath(_from)
[ "def", "load", "(", "self", ",", "_from", ")", ":", "def", "load_rec", "(", "inode", ",", "config", ")", ":", "\"\"\"Recursive part of loading.\"\"\"", "for", "k", "in", "config", ":", "if", "isinstance", "(", "config", "[", "k", "]", ",", "dict", ")", "and", "k", "not", "in", "[", "'value'", ",", "'default'", "]", ":", "if", "k", "in", "inode", ":", "load_rec", "(", "inode", "[", "k", "]", ",", "config", "[", "k", "]", ")", "else", ":", "LOG", ".", "debug", "(", "\"+ config element: '%s'\"", ",", "k", ")", "else", ":", "inode", "[", "k", "]", "=", "config", "[", "k", "]", "with", "open", "(", "_from", ",", "'r'", ")", "as", "infile", ":", "obj", "=", "yaml", ".", "load", "(", "infile", ",", "Loader", "=", "ConfigLoader", ")", "upgrade", "(", "obj", ")", "load_rec", "(", "self", ".", "node", ",", "obj", ")", "self", "[", "'config_file'", "]", "=", "os", ".", "path", ".", "abspath", "(", "_from", ")" ]
Load the configuration dictionary from file.
[ "Load", "the", "configuration", "dictionary", "from", "file", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L231-L250
PolyJIT/benchbuild
benchbuild/utils/settings.py
Configuration.init_from_env
def init_from_env(self): """ Initialize this node from environment. If we're a leaf node, i.e., a node containing a dictionary that consist of a 'default' key, compute our env variable and initialize our value from the environment. Otherwise, init our children. """ if 'default' in self.node: env_var = self.__to_env_var__().upper() if self.has_value(): env_val = self.node['value'] else: env_val = self.node['default'] env_val = os.getenv(env_var, to_yaml(env_val)) try: self.node['value'] = yaml.load( str(env_val), Loader=ConfigLoader) except ValueError: self.node['value'] = env_val else: if isinstance(self.node, dict): for k in self.node: self[k].init_from_env()
python
def init_from_env(self): """ Initialize this node from environment. If we're a leaf node, i.e., a node containing a dictionary that consist of a 'default' key, compute our env variable and initialize our value from the environment. Otherwise, init our children. """ if 'default' in self.node: env_var = self.__to_env_var__().upper() if self.has_value(): env_val = self.node['value'] else: env_val = self.node['default'] env_val = os.getenv(env_var, to_yaml(env_val)) try: self.node['value'] = yaml.load( str(env_val), Loader=ConfigLoader) except ValueError: self.node['value'] = env_val else: if isinstance(self.node, dict): for k in self.node: self[k].init_from_env()
[ "def", "init_from_env", "(", "self", ")", ":", "if", "'default'", "in", "self", ".", "node", ":", "env_var", "=", "self", ".", "__to_env_var__", "(", ")", ".", "upper", "(", ")", "if", "self", ".", "has_value", "(", ")", ":", "env_val", "=", "self", ".", "node", "[", "'value'", "]", "else", ":", "env_val", "=", "self", ".", "node", "[", "'default'", "]", "env_val", "=", "os", ".", "getenv", "(", "env_var", ",", "to_yaml", "(", "env_val", ")", ")", "try", ":", "self", ".", "node", "[", "'value'", "]", "=", "yaml", ".", "load", "(", "str", "(", "env_val", ")", ",", "Loader", "=", "ConfigLoader", ")", "except", "ValueError", ":", "self", ".", "node", "[", "'value'", "]", "=", "env_val", "else", ":", "if", "isinstance", "(", "self", ".", "node", ",", "dict", ")", ":", "for", "k", "in", "self", ".", "node", ":", "self", "[", "k", "]", ".", "init_from_env", "(", ")" ]
Initialize this node from environment. If we're a leaf node, i.e., a node containing a dictionary that consist of a 'default' key, compute our env variable and initialize our value from the environment. Otherwise, init our children.
[ "Initialize", "this", "node", "from", "environment", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L264-L289
PolyJIT/benchbuild
benchbuild/utils/settings.py
Configuration.value
def value(self): """ Return the node value, if we're a leaf node. Examples: >>> c = Configuration("test") >>> c['x'] = { "y" : { "value" : None }, "z" : { "value" : 2 }} >>> c['x']['y'].value == None True >>> c['x']['z'].value 2 >>> c['x'].value TEST_X_Y=null TEST_X_Z=2 """ def validate(node_value): if hasattr(node_value, 'validate'): node_value.validate() return node_value if 'value' in self.node: return validate(self.node['value']) return self
python
def value(self): """ Return the node value, if we're a leaf node. Examples: >>> c = Configuration("test") >>> c['x'] = { "y" : { "value" : None }, "z" : { "value" : 2 }} >>> c['x']['y'].value == None True >>> c['x']['z'].value 2 >>> c['x'].value TEST_X_Y=null TEST_X_Z=2 """ def validate(node_value): if hasattr(node_value, 'validate'): node_value.validate() return node_value if 'value' in self.node: return validate(self.node['value']) return self
[ "def", "value", "(", "self", ")", ":", "def", "validate", "(", "node_value", ")", ":", "if", "hasattr", "(", "node_value", ",", "'validate'", ")", ":", "node_value", ".", "validate", "(", ")", "return", "node_value", "if", "'value'", "in", "self", ".", "node", ":", "return", "validate", "(", "self", ".", "node", "[", "'value'", "]", ")", "return", "self" ]
Return the node value, if we're a leaf node. Examples: >>> c = Configuration("test") >>> c['x'] = { "y" : { "value" : None }, "z" : { "value" : 2 }} >>> c['x']['y'].value == None True >>> c['x']['z'].value 2 >>> c['x'].value TEST_X_Y=null TEST_X_Z=2
[ "Return", "the", "node", "value", "if", "we", "re", "a", "leaf", "node", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L292-L316
PolyJIT/benchbuild
benchbuild/utils/settings.py
Configuration.to_env_dict
def to_env_dict(self): """Convert configuration object to a flat dictionary.""" entries = {} if self.has_value(): return {self.__to_env_var__(): self.node['value']} if self.has_default(): return {self.__to_env_var__(): self.node['default']} for k in self.node: entries.update(self[k].to_env_dict()) return entries
python
def to_env_dict(self): """Convert configuration object to a flat dictionary.""" entries = {} if self.has_value(): return {self.__to_env_var__(): self.node['value']} if self.has_default(): return {self.__to_env_var__(): self.node['default']} for k in self.node: entries.update(self[k].to_env_dict()) return entries
[ "def", "to_env_dict", "(", "self", ")", ":", "entries", "=", "{", "}", "if", "self", ".", "has_value", "(", ")", ":", "return", "{", "self", ".", "__to_env_var__", "(", ")", ":", "self", ".", "node", "[", "'value'", "]", "}", "if", "self", ".", "has_default", "(", ")", ":", "return", "{", "self", ".", "__to_env_var__", "(", ")", ":", "self", ".", "node", "[", "'default'", "]", "}", "for", "k", "in", "self", ".", "node", ":", "entries", ".", "update", "(", "self", "[", "k", "]", ".", "to_env_dict", "(", ")", ")", "return", "entries" ]
Convert configuration object to a flat dictionary.
[ "Convert", "configuration", "object", "to", "a", "flat", "dictionary", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L454-L465
portfoliome/foil
foil/records.py
rename_keys
def rename_keys(record: Mapping, key_map: Mapping) -> dict: """New record with same keys or renamed keys if key found in key_map.""" new_record = dict() for k, v in record.items(): key = key_map[k] if k in key_map else k new_record[key] = v return new_record
python
def rename_keys(record: Mapping, key_map: Mapping) -> dict: """New record with same keys or renamed keys if key found in key_map.""" new_record = dict() for k, v in record.items(): key = key_map[k] if k in key_map else k new_record[key] = v return new_record
[ "def", "rename_keys", "(", "record", ":", "Mapping", ",", "key_map", ":", "Mapping", ")", "->", "dict", ":", "new_record", "=", "dict", "(", ")", "for", "k", ",", "v", "in", "record", ".", "items", "(", ")", ":", "key", "=", "key_map", "[", "k", "]", "if", "k", "in", "key_map", "else", "k", "new_record", "[", "key", "]", "=", "v", "return", "new_record" ]
New record with same keys or renamed keys if key found in key_map.
[ "New", "record", "with", "same", "keys", "or", "renamed", "keys", "if", "key", "found", "in", "key_map", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/records.py#L6-L15
portfoliome/foil
foil/records.py
replace_keys
def replace_keys(record: Mapping, key_map: Mapping) -> dict: """New record with renamed keys including keys only found in key_map.""" return {key_map[k]: v for k, v in record.items() if k in key_map}
python
def replace_keys(record: Mapping, key_map: Mapping) -> dict: """New record with renamed keys including keys only found in key_map.""" return {key_map[k]: v for k, v in record.items() if k in key_map}
[ "def", "replace_keys", "(", "record", ":", "Mapping", ",", "key_map", ":", "Mapping", ")", "->", "dict", ":", "return", "{", "key_map", "[", "k", "]", ":", "v", "for", "k", ",", "v", "in", "record", ".", "items", "(", ")", "if", "k", "in", "key_map", "}" ]
New record with renamed keys including keys only found in key_map.
[ "New", "record", "with", "renamed", "keys", "including", "keys", "only", "found", "in", "key_map", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/records.py#L18-L21
portfoliome/foil
foil/records.py
inject_nulls
def inject_nulls(data: Mapping, field_names) -> dict: """Insert None as value for missing fields.""" record = dict() for field in field_names: record[field] = data.get(field, None) return record
python
def inject_nulls(data: Mapping, field_names) -> dict: """Insert None as value for missing fields.""" record = dict() for field in field_names: record[field] = data.get(field, None) return record
[ "def", "inject_nulls", "(", "data", ":", "Mapping", ",", "field_names", ")", "->", "dict", ":", "record", "=", "dict", "(", ")", "for", "field", "in", "field_names", ":", "record", "[", "field", "]", "=", "data", ".", "get", "(", "field", ",", "None", ")", "return", "record" ]
Insert None as value for missing fields.
[ "Insert", "None", "as", "value", "for", "missing", "fields", "." ]
train
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/records.py#L24-L32
PolyJIT/benchbuild
benchbuild/likwid.py
read_struct
def read_struct(fstream): """ Read a likwid struct from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing all likwid's struct info as key/value pairs. """ line = fstream.readline().strip() fragments = line.split(",") fragments = [x for x in fragments if x is not None] partition = dict() if not len(fragments) >= 3: return None partition["struct"] = fragments[0] partition["info"] = fragments[1] partition["num_lines"] = fragments[2] struct = None if partition is not None and partition["struct"] == "STRUCT": num_lines = int(partition["num_lines"].strip()) struct = {} for _ in range(num_lines): cols = fetch_cols(fstream) struct.update({cols[0]: cols[1:]}) return struct
python
def read_struct(fstream): """ Read a likwid struct from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing all likwid's struct info as key/value pairs. """ line = fstream.readline().strip() fragments = line.split(",") fragments = [x for x in fragments if x is not None] partition = dict() if not len(fragments) >= 3: return None partition["struct"] = fragments[0] partition["info"] = fragments[1] partition["num_lines"] = fragments[2] struct = None if partition is not None and partition["struct"] == "STRUCT": num_lines = int(partition["num_lines"].strip()) struct = {} for _ in range(num_lines): cols = fetch_cols(fstream) struct.update({cols[0]: cols[1:]}) return struct
[ "def", "read_struct", "(", "fstream", ")", ":", "line", "=", "fstream", ".", "readline", "(", ")", ".", "strip", "(", ")", "fragments", "=", "line", ".", "split", "(", "\",\"", ")", "fragments", "=", "[", "x", "for", "x", "in", "fragments", "if", "x", "is", "not", "None", "]", "partition", "=", "dict", "(", ")", "if", "not", "len", "(", "fragments", ")", ">=", "3", ":", "return", "None", "partition", "[", "\"struct\"", "]", "=", "fragments", "[", "0", "]", "partition", "[", "\"info\"", "]", "=", "fragments", "[", "1", "]", "partition", "[", "\"num_lines\"", "]", "=", "fragments", "[", "2", "]", "struct", "=", "None", "if", "partition", "is", "not", "None", "and", "partition", "[", "\"struct\"", "]", "==", "\"STRUCT\"", ":", "num_lines", "=", "int", "(", "partition", "[", "\"num_lines\"", "]", ".", "strip", "(", ")", ")", "struct", "=", "{", "}", "for", "_", "in", "range", "(", "num_lines", ")", ":", "cols", "=", "fetch_cols", "(", "fstream", ")", "struct", ".", "update", "(", "{", "cols", "[", "0", "]", ":", "cols", "[", "1", ":", "]", "}", ")", "return", "struct" ]
Read a likwid struct from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing all likwid's struct info as key/value pairs.
[ "Read", "a", "likwid", "struct", "from", "the", "text", "stream", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L23-L51
PolyJIT/benchbuild
benchbuild/likwid.py
read_table
def read_table(fstream): """ Read a likwid table info from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing likwid's table info as key/value pairs. """ pos = fstream.tell() line = fstream.readline().strip() fragments = line.split(",") fragments = [x for x in fragments if x is not None] partition = dict() if not len(fragments) >= 4: return None partition["table"] = fragments[0] partition["group"] = fragments[1] partition["set"] = fragments[2] partition["num_lines"] = fragments[3] struct = None if partition is not None and partition["table"] == "TABLE": num_lines = int(partition["num_lines"].strip()) struct = {} header = fetch_cols(fstream) struct.update({header[0]: header[1:]}) for _ in range(num_lines): cols = fetch_cols(fstream) struct.update({cols[0]: cols[1:]}) else: fstream.seek(pos) return struct
python
def read_table(fstream): """ Read a likwid table info from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing likwid's table info as key/value pairs. """ pos = fstream.tell() line = fstream.readline().strip() fragments = line.split(",") fragments = [x for x in fragments if x is not None] partition = dict() if not len(fragments) >= 4: return None partition["table"] = fragments[0] partition["group"] = fragments[1] partition["set"] = fragments[2] partition["num_lines"] = fragments[3] struct = None if partition is not None and partition["table"] == "TABLE": num_lines = int(partition["num_lines"].strip()) struct = {} header = fetch_cols(fstream) struct.update({header[0]: header[1:]}) for _ in range(num_lines): cols = fetch_cols(fstream) struct.update({cols[0]: cols[1:]}) else: fstream.seek(pos) return struct
[ "def", "read_table", "(", "fstream", ")", ":", "pos", "=", "fstream", ".", "tell", "(", ")", "line", "=", "fstream", ".", "readline", "(", ")", ".", "strip", "(", ")", "fragments", "=", "line", ".", "split", "(", "\",\"", ")", "fragments", "=", "[", "x", "for", "x", "in", "fragments", "if", "x", "is", "not", "None", "]", "partition", "=", "dict", "(", ")", "if", "not", "len", "(", "fragments", ")", ">=", "4", ":", "return", "None", "partition", "[", "\"table\"", "]", "=", "fragments", "[", "0", "]", "partition", "[", "\"group\"", "]", "=", "fragments", "[", "1", "]", "partition", "[", "\"set\"", "]", "=", "fragments", "[", "2", "]", "partition", "[", "\"num_lines\"", "]", "=", "fragments", "[", "3", "]", "struct", "=", "None", "if", "partition", "is", "not", "None", "and", "partition", "[", "\"table\"", "]", "==", "\"TABLE\"", ":", "num_lines", "=", "int", "(", "partition", "[", "\"num_lines\"", "]", ".", "strip", "(", ")", ")", "struct", "=", "{", "}", "header", "=", "fetch_cols", "(", "fstream", ")", "struct", ".", "update", "(", "{", "header", "[", "0", "]", ":", "header", "[", "1", ":", "]", "}", ")", "for", "_", "in", "range", "(", "num_lines", ")", ":", "cols", "=", "fetch_cols", "(", "fstream", ")", "struct", ".", "update", "(", "{", "cols", "[", "0", "]", ":", "cols", "[", "1", ":", "]", "}", ")", "else", ":", "fstream", ".", "seek", "(", "pos", ")", "return", "struct" ]
Read a likwid table info from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing likwid's table info as key/value pairs.
[ "Read", "a", "likwid", "table", "info", "from", "the", "text", "stream", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L54-L90
PolyJIT/benchbuild
benchbuild/likwid.py
read_structs
def read_structs(fstream): """ Read all structs from likwid's file stream. Args: fstream: Likwid's output file stream. Returns: A generator that can be used to iterate over all structs in the fstream. """ struct = read_struct(fstream) while struct is not None: yield struct struct = read_struct(fstream)
python
def read_structs(fstream): """ Read all structs from likwid's file stream. Args: fstream: Likwid's output file stream. Returns: A generator that can be used to iterate over all structs in the fstream. """ struct = read_struct(fstream) while struct is not None: yield struct struct = read_struct(fstream)
[ "def", "read_structs", "(", "fstream", ")", ":", "struct", "=", "read_struct", "(", "fstream", ")", "while", "struct", "is", "not", "None", ":", "yield", "struct", "struct", "=", "read_struct", "(", "fstream", ")" ]
Read all structs from likwid's file stream. Args: fstream: Likwid's output file stream. Returns: A generator that can be used to iterate over all structs in the fstream.
[ "Read", "all", "structs", "from", "likwid", "s", "file", "stream", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L93-L107
PolyJIT/benchbuild
benchbuild/likwid.py
read_tables
def read_tables(fstream): """ Read all tables from likwid's file stream. Args: fstream: Likwid's output file stream. Returns: A generator that can be used to iterate over all tables in the fstream. """ table = read_table(fstream) while table is not None: yield table table = read_table(fstream)
python
def read_tables(fstream): """ Read all tables from likwid's file stream. Args: fstream: Likwid's output file stream. Returns: A generator that can be used to iterate over all tables in the fstream. """ table = read_table(fstream) while table is not None: yield table table = read_table(fstream)
[ "def", "read_tables", "(", "fstream", ")", ":", "table", "=", "read_table", "(", "fstream", ")", "while", "table", "is", "not", "None", ":", "yield", "table", "table", "=", "read_table", "(", "fstream", ")" ]
Read all tables from likwid's file stream. Args: fstream: Likwid's output file stream. Returns: A generator that can be used to iterate over all tables in the fstream.
[ "Read", "all", "tables", "from", "likwid", "s", "file", "stream", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L110-L123
PolyJIT/benchbuild
benchbuild/likwid.py
get_measurements
def get_measurements(region, core_info, data, extra_offset=0): """ Get the complete measurement info from likwid's region info. Args: region: The region we took a measurement in. core_info: The core information. data: The raw data. extra_offset (int): default = 0 Returns (list((region, metric, core, value))): A list of measurement tuples, a tuple contains the information about the region, the metric, the core and the actual value. """ measurements = [] clean_core_info = [x for x in core_info if x] cores = len(clean_core_info) for k in data: if k not in ["1", "Region Info", "Event", "Metric", "CPU clock"]: slot = data[k] for i in range(cores): core = core_info[i] idx = extra_offset + i if core and slot[idx]: measurements.append((region, k, core, slot[idx])) return measurements
python
def get_measurements(region, core_info, data, extra_offset=0): """ Get the complete measurement info from likwid's region info. Args: region: The region we took a measurement in. core_info: The core information. data: The raw data. extra_offset (int): default = 0 Returns (list((region, metric, core, value))): A list of measurement tuples, a tuple contains the information about the region, the metric, the core and the actual value. """ measurements = [] clean_core_info = [x for x in core_info if x] cores = len(clean_core_info) for k in data: if k not in ["1", "Region Info", "Event", "Metric", "CPU clock"]: slot = data[k] for i in range(cores): core = core_info[i] idx = extra_offset + i if core and slot[idx]: measurements.append((region, k, core, slot[idx])) return measurements
[ "def", "get_measurements", "(", "region", ",", "core_info", ",", "data", ",", "extra_offset", "=", "0", ")", ":", "measurements", "=", "[", "]", "clean_core_info", "=", "[", "x", "for", "x", "in", "core_info", "if", "x", "]", "cores", "=", "len", "(", "clean_core_info", ")", "for", "k", "in", "data", ":", "if", "k", "not", "in", "[", "\"1\"", ",", "\"Region Info\"", ",", "\"Event\"", ",", "\"Metric\"", ",", "\"CPU clock\"", "]", ":", "slot", "=", "data", "[", "k", "]", "for", "i", "in", "range", "(", "cores", ")", ":", "core", "=", "core_info", "[", "i", "]", "idx", "=", "extra_offset", "+", "i", "if", "core", "and", "slot", "[", "idx", "]", ":", "measurements", ".", "append", "(", "(", "region", ",", "k", ",", "core", ",", "slot", "[", "idx", "]", ")", ")", "return", "measurements" ]
Get the complete measurement info from likwid's region info. Args: region: The region we took a measurement in. core_info: The core information. data: The raw data. extra_offset (int): default = 0 Returns (list((region, metric, core, value))): A list of measurement tuples, a tuple contains the information about the region, the metric, the core and the actual value.
[ "Get", "the", "complete", "measurement", "info", "from", "likwid", "s", "region", "info", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L126-L152
PolyJIT/benchbuild
benchbuild/likwid.py
perfcounters
def perfcounters(infile): """ Get a complete list of all measurements. Args: infile: The filestream containing all likwid output. Returns: A list of all measurements extracted from likwid's file stream. """ measurements = [] with open(infile, 'r') as in_file: read_struct(in_file) for region_struct in read_structs(in_file): region = region_struct["1"][1] core_info = region_struct["Region Info"] measurements += \ get_measurements(region, core_info, region_struct) for table_struct in read_tables(in_file): core_info = None if "Event" in table_struct: offset = 1 core_info = table_struct["Event"][offset:] measurements += get_measurements(region, core_info, table_struct, offset) elif "Metric" in table_struct: core_info = table_struct["Metric"] measurements += get_measurements(region, core_info, table_struct) return measurements
python
def perfcounters(infile): """ Get a complete list of all measurements. Args: infile: The filestream containing all likwid output. Returns: A list of all measurements extracted from likwid's file stream. """ measurements = [] with open(infile, 'r') as in_file: read_struct(in_file) for region_struct in read_structs(in_file): region = region_struct["1"][1] core_info = region_struct["Region Info"] measurements += \ get_measurements(region, core_info, region_struct) for table_struct in read_tables(in_file): core_info = None if "Event" in table_struct: offset = 1 core_info = table_struct["Event"][offset:] measurements += get_measurements(region, core_info, table_struct, offset) elif "Metric" in table_struct: core_info = table_struct["Metric"] measurements += get_measurements(region, core_info, table_struct) return measurements
[ "def", "perfcounters", "(", "infile", ")", ":", "measurements", "=", "[", "]", "with", "open", "(", "infile", ",", "'r'", ")", "as", "in_file", ":", "read_struct", "(", "in_file", ")", "for", "region_struct", "in", "read_structs", "(", "in_file", ")", ":", "region", "=", "region_struct", "[", "\"1\"", "]", "[", "1", "]", "core_info", "=", "region_struct", "[", "\"Region Info\"", "]", "measurements", "+=", "get_measurements", "(", "region", ",", "core_info", ",", "region_struct", ")", "for", "table_struct", "in", "read_tables", "(", "in_file", ")", ":", "core_info", "=", "None", "if", "\"Event\"", "in", "table_struct", ":", "offset", "=", "1", "core_info", "=", "table_struct", "[", "\"Event\"", "]", "[", "offset", ":", "]", "measurements", "+=", "get_measurements", "(", "region", ",", "core_info", ",", "table_struct", ",", "offset", ")", "elif", "\"Metric\"", "in", "table_struct", ":", "core_info", "=", "table_struct", "[", "\"Metric\"", "]", "measurements", "+=", "get_measurements", "(", "region", ",", "core_info", ",", "table_struct", ")", "return", "measurements" ]
Get a complete list of all measurements. Args: infile: The filestream containing all likwid output. Returns: A list of all measurements extracted from likwid's file stream.
[ "Get", "a", "complete", "list", "of", "all", "measurements", "." ]
train
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L155-L185
BlueBrain/hpcbench
hpcbench/cli/__init__.py
setup_logger
def setup_logger(verbose, logfile): """Prepare root logger :param verbose: integer greater than 0 to indicate verbosity level """ level = logging.WARNING if verbose == 1: level = logging.INFO elif verbose > 1: level = logging.DEBUG if logfile: logging.basicConfig(filename=logfile, level=level, format=LOGGING_FORMAT) else: logging.basicConfig(level=level, format=LOGGING_FORMAT)
python
def setup_logger(verbose, logfile): """Prepare root logger :param verbose: integer greater than 0 to indicate verbosity level """ level = logging.WARNING if verbose == 1: level = logging.INFO elif verbose > 1: level = logging.DEBUG if logfile: logging.basicConfig(filename=logfile, level=level, format=LOGGING_FORMAT) else: logging.basicConfig(level=level, format=LOGGING_FORMAT)
[ "def", "setup_logger", "(", "verbose", ",", "logfile", ")", ":", "level", "=", "logging", ".", "WARNING", "if", "verbose", "==", "1", ":", "level", "=", "logging", ".", "INFO", "elif", "verbose", ">", "1", ":", "level", "=", "logging", ".", "DEBUG", "if", "logfile", ":", "logging", ".", "basicConfig", "(", "filename", "=", "logfile", ",", "level", "=", "level", ",", "format", "=", "LOGGING_FORMAT", ")", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "level", ",", "format", "=", "LOGGING_FORMAT", ")" ]
Prepare root logger :param verbose: integer greater than 0 to indicate verbosity level
[ "Prepare", "root", "logger", ":", "param", "verbose", ":", "integer", "greater", "than", "0", "to", "indicate", "verbosity", "level" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/__init__.py#L13-L25
BlueBrain/hpcbench
hpcbench/cli/__init__.py
cli_common
def cli_common(doc, **kwargs): """Program initialization for all provided executables """ arguments = docopt(doc, version='hpcbench ' + __version__, **kwargs) setup_logger(arguments['-v'], arguments['--log']) load_components() try: import matplotlib except ImportError: pass else: matplotlib.use('PS') return arguments
python
def cli_common(doc, **kwargs): """Program initialization for all provided executables """ arguments = docopt(doc, version='hpcbench ' + __version__, **kwargs) setup_logger(arguments['-v'], arguments['--log']) load_components() try: import matplotlib except ImportError: pass else: matplotlib.use('PS') return arguments
[ "def", "cli_common", "(", "doc", ",", "*", "*", "kwargs", ")", ":", "arguments", "=", "docopt", "(", "doc", ",", "version", "=", "'hpcbench '", "+", "__version__", ",", "*", "*", "kwargs", ")", "setup_logger", "(", "arguments", "[", "'-v'", "]", ",", "arguments", "[", "'--log'", "]", ")", "load_components", "(", ")", "try", ":", "import", "matplotlib", "except", "ImportError", ":", "pass", "else", ":", "matplotlib", ".", "use", "(", "'PS'", ")", "return", "arguments" ]
Program initialization for all provided executables
[ "Program", "initialization", "for", "all", "provided", "executables" ]
train
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/__init__.py#L28-L40
sci-bots/svg-model
svg_model/__init__.py
shape_path_points
def shape_path_points(svg_path_d): ''' Parameters ---------- svg_path_d : str ``"d"`` attribute of SVG ``path`` element. Returns ------- list List of coordinates of points found in SVG path. Each point is represented by a dictionary with keys ``x`` and ``y``. ''' # TODO Add support for relative commands, e.g., `l, h, v`. def _update_path_state(path_state, match): if match.group('xy_command'): for dim_j in 'xy': path_state[dim_j] = float(match.group(dim_j)) if path_state.get('x0') is None: for dim_j in 'xy': path_state['%s0' % dim_j] = path_state[dim_j] elif match.group('x_command'): path_state['x'] = float(match.group('hx')) elif match.group('y_command'): path_state['y'] = float(match.group('vy')) elif match.group('command') == 'Z': for dim_j in 'xy': path_state[dim_j] = path_state['%s0' % dim_j] return path_state # Some commands in a SVG path element `"d"` attribute require previous state. # # For example, the `"H"` command is a horizontal move, so the previous # ``y`` position is required to resolve the new `(x, y)` position. # # Iterate through the commands in the `"d"` attribute in order and maintain # the current path position in the `path_state` dictionary. path_state = {'x': None, 'y': None} return [{k: v for k, v in six.iteritems(_update_path_state(path_state, match_i)) if k in 'xy'} for match_i in cre_path_command .finditer(svg_path_d)]
python
def shape_path_points(svg_path_d): ''' Parameters ---------- svg_path_d : str ``"d"`` attribute of SVG ``path`` element. Returns ------- list List of coordinates of points found in SVG path. Each point is represented by a dictionary with keys ``x`` and ``y``. ''' # TODO Add support for relative commands, e.g., `l, h, v`. def _update_path_state(path_state, match): if match.group('xy_command'): for dim_j in 'xy': path_state[dim_j] = float(match.group(dim_j)) if path_state.get('x0') is None: for dim_j in 'xy': path_state['%s0' % dim_j] = path_state[dim_j] elif match.group('x_command'): path_state['x'] = float(match.group('hx')) elif match.group('y_command'): path_state['y'] = float(match.group('vy')) elif match.group('command') == 'Z': for dim_j in 'xy': path_state[dim_j] = path_state['%s0' % dim_j] return path_state # Some commands in a SVG path element `"d"` attribute require previous state. # # For example, the `"H"` command is a horizontal move, so the previous # ``y`` position is required to resolve the new `(x, y)` position. # # Iterate through the commands in the `"d"` attribute in order and maintain # the current path position in the `path_state` dictionary. path_state = {'x': None, 'y': None} return [{k: v for k, v in six.iteritems(_update_path_state(path_state, match_i)) if k in 'xy'} for match_i in cre_path_command .finditer(svg_path_d)]
[ "def", "shape_path_points", "(", "svg_path_d", ")", ":", "# TODO Add support for relative commands, e.g., `l, h, v`.", "def", "_update_path_state", "(", "path_state", ",", "match", ")", ":", "if", "match", ".", "group", "(", "'xy_command'", ")", ":", "for", "dim_j", "in", "'xy'", ":", "path_state", "[", "dim_j", "]", "=", "float", "(", "match", ".", "group", "(", "dim_j", ")", ")", "if", "path_state", ".", "get", "(", "'x0'", ")", "is", "None", ":", "for", "dim_j", "in", "'xy'", ":", "path_state", "[", "'%s0'", "%", "dim_j", "]", "=", "path_state", "[", "dim_j", "]", "elif", "match", ".", "group", "(", "'x_command'", ")", ":", "path_state", "[", "'x'", "]", "=", "float", "(", "match", ".", "group", "(", "'hx'", ")", ")", "elif", "match", ".", "group", "(", "'y_command'", ")", ":", "path_state", "[", "'y'", "]", "=", "float", "(", "match", ".", "group", "(", "'vy'", ")", ")", "elif", "match", ".", "group", "(", "'command'", ")", "==", "'Z'", ":", "for", "dim_j", "in", "'xy'", ":", "path_state", "[", "dim_j", "]", "=", "path_state", "[", "'%s0'", "%", "dim_j", "]", "return", "path_state", "# Some commands in a SVG path element `\"d\"` attribute require previous state.", "#", "# For example, the `\"H\"` command is a horizontal move, so the previous", "# ``y`` position is required to resolve the new `(x, y)` position.", "#", "# Iterate through the commands in the `\"d\"` attribute in order and maintain", "# the current path position in the `path_state` dictionary.", "path_state", "=", "{", "'x'", ":", "None", ",", "'y'", ":", "None", "}", "return", "[", "{", "k", ":", "v", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "_update_path_state", "(", "path_state", ",", "match_i", ")", ")", "if", "k", "in", "'xy'", "}", "for", "match_i", "in", "cre_path_command", ".", "finditer", "(", "svg_path_d", ")", "]" ]
Parameters ---------- svg_path_d : str ``"d"`` attribute of SVG ``path`` element. Returns ------- list List of coordinates of points found in SVG path. Each point is represented by a dictionary with keys ``x`` and ``y``.
[ "Parameters", "----------", "svg_path_d", ":", "str", "d", "attribute", "of", "SVG", "path", "element", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/__init__.py#L39-L79
sci-bots/svg-model
svg_model/__init__.py
svg_shapes_to_df
def svg_shapes_to_df(svg_source, xpath='//svg:path | //svg:polygon', namespaces=INKSCAPE_NSMAP): ''' Construct a data frame with one row per vertex for all shapes in :data:`svg_source``. Arguments --------- svg_source : str or file-like A file path, URI, or file-like object. xpath : str, optional XPath path expression to select shape nodes. By default, all ``svg:path`` and ``svg:polygon`` elements are selected. namespaces : dict, optional Key/value mapping of XML namespaces. Returns ------- pandas.DataFrame Frame with one row per vertex for all shapes in :data:`svg_source`, with the following columns: - ``vertex_i``: The index of the vertex within the corresponding shape. - ``x``: The x-coordinate of the vertex. - ``y``: The y-coordinate of the vertex. - other: attributes of the SVG shape element (e.g., ``id``, ``fill``, etc.) ''' from lxml import etree e_root = etree.parse(svg_source) frames = [] attribs_set = set() # Get list of attributes that are set in any of the shapes (not including # the `svg:path` `"d"` attribute or the `svg:polygon` `"points"` # attribute). # # This, for example, collects attributes such as: # # - `fill`, `stroke` (as part of `"style"` attribute) # - `"transform"`: matrix, scale, etc. for shape_i in e_root.xpath(xpath, namespaces=namespaces): attribs_set.update(list(shape_i.attrib.keys())) for k in ('d', 'points'): if k in attribs_set: attribs_set.remove(k) attribs = list(sorted(attribs_set)) # Always add 'id' attribute as first attribute. if 'id' in attribs: attribs.remove('id') attribs.insert(0, 'id') for shape_i in e_root.xpath(xpath, namespaces=namespaces): # Gather shape attributes from SVG element. base_fields = [shape_i.attrib.get(k, None) for k in attribs] if shape_i.tag == '{http://www.w3.org/2000/svg}path': # Decode `svg:path` vertices from [`"d"`][1] attribute. # # [1]: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/d points_i = [base_fields + [i] + [point_i.get(k) for k in 'xy'] for i, point_i in enumerate(shape_path_points(shape_i.attrib['d']))] elif shape_i.tag == '{http://www.w3.org/2000/svg}polygon': # Decode `svg:polygon` vertices from [`"points"`][2] attribute. # # [2]: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/points points_i = [base_fields + [i] + list(map(float, v.split(','))) for i, v in enumerate(shape_i.attrib['points'] .strip().split(' '))] else: warnings.warning('Unsupported shape tag type: %s' % shape_i.tag) continue frames.extend(points_i) if not frames: # There were no shapes found, so set `frames` list to `None` to allow # an empty data frame to be created. frames = None return pd.DataFrame(frames, columns=attribs + ['vertex_i', 'x', 'y'])
python
def svg_shapes_to_df(svg_source, xpath='//svg:path | //svg:polygon', namespaces=INKSCAPE_NSMAP): ''' Construct a data frame with one row per vertex for all shapes in :data:`svg_source``. Arguments --------- svg_source : str or file-like A file path, URI, or file-like object. xpath : str, optional XPath path expression to select shape nodes. By default, all ``svg:path`` and ``svg:polygon`` elements are selected. namespaces : dict, optional Key/value mapping of XML namespaces. Returns ------- pandas.DataFrame Frame with one row per vertex for all shapes in :data:`svg_source`, with the following columns: - ``vertex_i``: The index of the vertex within the corresponding shape. - ``x``: The x-coordinate of the vertex. - ``y``: The y-coordinate of the vertex. - other: attributes of the SVG shape element (e.g., ``id``, ``fill``, etc.) ''' from lxml import etree e_root = etree.parse(svg_source) frames = [] attribs_set = set() # Get list of attributes that are set in any of the shapes (not including # the `svg:path` `"d"` attribute or the `svg:polygon` `"points"` # attribute). # # This, for example, collects attributes such as: # # - `fill`, `stroke` (as part of `"style"` attribute) # - `"transform"`: matrix, scale, etc. for shape_i in e_root.xpath(xpath, namespaces=namespaces): attribs_set.update(list(shape_i.attrib.keys())) for k in ('d', 'points'): if k in attribs_set: attribs_set.remove(k) attribs = list(sorted(attribs_set)) # Always add 'id' attribute as first attribute. if 'id' in attribs: attribs.remove('id') attribs.insert(0, 'id') for shape_i in e_root.xpath(xpath, namespaces=namespaces): # Gather shape attributes from SVG element. base_fields = [shape_i.attrib.get(k, None) for k in attribs] if shape_i.tag == '{http://www.w3.org/2000/svg}path': # Decode `svg:path` vertices from [`"d"`][1] attribute. # # [1]: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/d points_i = [base_fields + [i] + [point_i.get(k) for k in 'xy'] for i, point_i in enumerate(shape_path_points(shape_i.attrib['d']))] elif shape_i.tag == '{http://www.w3.org/2000/svg}polygon': # Decode `svg:polygon` vertices from [`"points"`][2] attribute. # # [2]: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/points points_i = [base_fields + [i] + list(map(float, v.split(','))) for i, v in enumerate(shape_i.attrib['points'] .strip().split(' '))] else: warnings.warning('Unsupported shape tag type: %s' % shape_i.tag) continue frames.extend(points_i) if not frames: # There were no shapes found, so set `frames` list to `None` to allow # an empty data frame to be created. frames = None return pd.DataFrame(frames, columns=attribs + ['vertex_i', 'x', 'y'])
[ "def", "svg_shapes_to_df", "(", "svg_source", ",", "xpath", "=", "'//svg:path | //svg:polygon'", ",", "namespaces", "=", "INKSCAPE_NSMAP", ")", ":", "from", "lxml", "import", "etree", "e_root", "=", "etree", ".", "parse", "(", "svg_source", ")", "frames", "=", "[", "]", "attribs_set", "=", "set", "(", ")", "# Get list of attributes that are set in any of the shapes (not including", "# the `svg:path` `\"d\"` attribute or the `svg:polygon` `\"points\"`", "# attribute).", "#", "# This, for example, collects attributes such as:", "#", "# - `fill`, `stroke` (as part of `\"style\"` attribute)", "# - `\"transform\"`: matrix, scale, etc.", "for", "shape_i", "in", "e_root", ".", "xpath", "(", "xpath", ",", "namespaces", "=", "namespaces", ")", ":", "attribs_set", ".", "update", "(", "list", "(", "shape_i", ".", "attrib", ".", "keys", "(", ")", ")", ")", "for", "k", "in", "(", "'d'", ",", "'points'", ")", ":", "if", "k", "in", "attribs_set", ":", "attribs_set", ".", "remove", "(", "k", ")", "attribs", "=", "list", "(", "sorted", "(", "attribs_set", ")", ")", "# Always add 'id' attribute as first attribute.", "if", "'id'", "in", "attribs", ":", "attribs", ".", "remove", "(", "'id'", ")", "attribs", ".", "insert", "(", "0", ",", "'id'", ")", "for", "shape_i", "in", "e_root", ".", "xpath", "(", "xpath", ",", "namespaces", "=", "namespaces", ")", ":", "# Gather shape attributes from SVG element.", "base_fields", "=", "[", "shape_i", ".", "attrib", ".", "get", "(", "k", ",", "None", ")", "for", "k", "in", "attribs", "]", "if", "shape_i", ".", "tag", "==", "'{http://www.w3.org/2000/svg}path'", ":", "# Decode `svg:path` vertices from [`\"d\"`][1] attribute.", "#", "# [1]: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/d", "points_i", "=", "[", "base_fields", "+", "[", "i", "]", "+", "[", "point_i", ".", "get", "(", "k", ")", "for", "k", "in", "'xy'", "]", "for", "i", ",", "point_i", "in", "enumerate", "(", "shape_path_points", "(", "shape_i", ".", "attrib", "[", "'d'", "]", ")", ")", "]", "elif", "shape_i", ".", "tag", "==", "'{http://www.w3.org/2000/svg}polygon'", ":", "# Decode `svg:polygon` vertices from [`\"points\"`][2] attribute.", "#", "# [2]: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/points", "points_i", "=", "[", "base_fields", "+", "[", "i", "]", "+", "list", "(", "map", "(", "float", ",", "v", ".", "split", "(", "','", ")", ")", ")", "for", "i", ",", "v", "in", "enumerate", "(", "shape_i", ".", "attrib", "[", "'points'", "]", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", ")", "]", "else", ":", "warnings", ".", "warning", "(", "'Unsupported shape tag type: %s'", "%", "shape_i", ".", "tag", ")", "continue", "frames", ".", "extend", "(", "points_i", ")", "if", "not", "frames", ":", "# There were no shapes found, so set `frames` list to `None` to allow", "# an empty data frame to be created.", "frames", "=", "None", "return", "pd", ".", "DataFrame", "(", "frames", ",", "columns", "=", "attribs", "+", "[", "'vertex_i'", ",", "'x'", ",", "'y'", "]", ")" ]
Construct a data frame with one row per vertex for all shapes in :data:`svg_source``. Arguments --------- svg_source : str or file-like A file path, URI, or file-like object. xpath : str, optional XPath path expression to select shape nodes. By default, all ``svg:path`` and ``svg:polygon`` elements are selected. namespaces : dict, optional Key/value mapping of XML namespaces. Returns ------- pandas.DataFrame Frame with one row per vertex for all shapes in :data:`svg_source`, with the following columns: - ``vertex_i``: The index of the vertex within the corresponding shape. - ``x``: The x-coordinate of the vertex. - ``y``: The y-coordinate of the vertex. - other: attributes of the SVG shape element (e.g., ``id``, ``fill``, etc.)
[ "Construct", "a", "data", "frame", "with", "one", "row", "per", "vertex", "for", "all", "shapes", "in", ":", "data", ":", "svg_source", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/__init__.py#L82-L165
sci-bots/svg-model
svg_model/__init__.py
svg_polygons_to_df
def svg_polygons_to_df(svg_source, xpath='//svg:polygon', namespaces=INKSCAPE_NSMAP): ''' Construct a data frame with one row per vertex for all shapes (e.g., ``svg:path``, ``svg:polygon``) in :data:`svg_source``. Arguments --------- svg_source : str or file-like A file path, URI, or file-like object. xpath : str, optional XPath path expression to select shape nodes. namespaces : dict, optional Key/value mapping of XML namespaces. Returns ------- pandas.DataFrame Frame with one row per vertex for all shapes in :data:`svg_source`, with the following columns: - ``path_id``: The ``id`` attribute of the corresponding shape. - ``vertex_i``: The index of the vertex within the corresponding shape. - ``x``: The x-coordinate of the vertex. - ``y``: The y-coordinate of the vertex. .. note:: Deprecated in :mod:`svg_model` 0.5.post10 :func:`svg_polygons_to_df` will be removed in :mod:`svg_model` 1.0, it is replaced by :func:`svg_shapes_to_df` because the latter is more general and works with ``svg:path`` and ``svg:polygon`` elements. ''' warnings.warn("The `svg_polygons_to_df` function is deprecated. Use " "`svg_shapes_to_df` instead.") result = svg_shapes_to_df(svg_source, xpath=xpath, namespaces=namespaces) return result[['id', 'vertex_i', 'x', 'y']].rename(columns={'id': 'path_id'})
python
def svg_polygons_to_df(svg_source, xpath='//svg:polygon', namespaces=INKSCAPE_NSMAP): ''' Construct a data frame with one row per vertex for all shapes (e.g., ``svg:path``, ``svg:polygon``) in :data:`svg_source``. Arguments --------- svg_source : str or file-like A file path, URI, or file-like object. xpath : str, optional XPath path expression to select shape nodes. namespaces : dict, optional Key/value mapping of XML namespaces. Returns ------- pandas.DataFrame Frame with one row per vertex for all shapes in :data:`svg_source`, with the following columns: - ``path_id``: The ``id`` attribute of the corresponding shape. - ``vertex_i``: The index of the vertex within the corresponding shape. - ``x``: The x-coordinate of the vertex. - ``y``: The y-coordinate of the vertex. .. note:: Deprecated in :mod:`svg_model` 0.5.post10 :func:`svg_polygons_to_df` will be removed in :mod:`svg_model` 1.0, it is replaced by :func:`svg_shapes_to_df` because the latter is more general and works with ``svg:path`` and ``svg:polygon`` elements. ''' warnings.warn("The `svg_polygons_to_df` function is deprecated. Use " "`svg_shapes_to_df` instead.") result = svg_shapes_to_df(svg_source, xpath=xpath, namespaces=namespaces) return result[['id', 'vertex_i', 'x', 'y']].rename(columns={'id': 'path_id'})
[ "def", "svg_polygons_to_df", "(", "svg_source", ",", "xpath", "=", "'//svg:polygon'", ",", "namespaces", "=", "INKSCAPE_NSMAP", ")", ":", "warnings", ".", "warn", "(", "\"The `svg_polygons_to_df` function is deprecated. Use \"", "\"`svg_shapes_to_df` instead.\"", ")", "result", "=", "svg_shapes_to_df", "(", "svg_source", ",", "xpath", "=", "xpath", ",", "namespaces", "=", "namespaces", ")", "return", "result", "[", "[", "'id'", ",", "'vertex_i'", ",", "'x'", ",", "'y'", "]", "]", ".", "rename", "(", "columns", "=", "{", "'id'", ":", "'path_id'", "}", ")" ]
Construct a data frame with one row per vertex for all shapes (e.g., ``svg:path``, ``svg:polygon``) in :data:`svg_source``. Arguments --------- svg_source : str or file-like A file path, URI, or file-like object. xpath : str, optional XPath path expression to select shape nodes. namespaces : dict, optional Key/value mapping of XML namespaces. Returns ------- pandas.DataFrame Frame with one row per vertex for all shapes in :data:`svg_source`, with the following columns: - ``path_id``: The ``id`` attribute of the corresponding shape. - ``vertex_i``: The index of the vertex within the corresponding shape. - ``x``: The x-coordinate of the vertex. - ``y``: The y-coordinate of the vertex. .. note:: Deprecated in :mod:`svg_model` 0.5.post10 :func:`svg_polygons_to_df` will be removed in :mod:`svg_model` 1.0, it is replaced by :func:`svg_shapes_to_df` because the latter is more general and works with ``svg:path`` and ``svg:polygon`` elements.
[ "Construct", "a", "data", "frame", "with", "one", "row", "per", "vertex", "for", "all", "shapes", "(", "e", ".", "g", ".", "svg", ":", "path", "svg", ":", "polygon", ")", "in", ":", "data", ":", "svg_source", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/__init__.py#L168-L204
sci-bots/svg-model
svg_model/__init__.py
compute_shape_centers
def compute_shape_centers(df_shapes, shape_i_column, inplace=False): ''' Compute the center point of each polygon shape, and the offset of each vertex to the corresponding polygon center point. Parameters ---------- df_shapes : pandas.DataFrame Table of polygon shape vertices (one row per vertex). Must have at least the following columns: - ``vertex_i``: The index of the vertex within the corresponding shape. - ``x``: The x-coordinate of the vertex. - ``y``: The y-coordinate of the vertex. shape_i_column : str or list, optional Table rows with the same value in the :data:`shape_i_column` column are grouped together as a shape. in_place : bool, optional If ``True``, center coordinate columns are added directly to the input frame. Otherwise, center coordinate columns are added to copy of the input frame. Returns ------- pandas.DataFrame Input frame with the following additional columns: - ``x_center``/``y_center``: Absolute coordinates of shape center. - ``x_center_offset``/``y_center_offset``: * Coordinates of each vertex coordinate relative to shape center. ''' if not isinstance(shape_i_column, bytes): raise KeyError('Shape index must be a single column.') if not inplace: df_shapes = df_shapes.copy() # Get coordinates of center of each path. df_bounding_boxes = get_bounding_boxes(df_shapes, shape_i_column) path_centers = (df_bounding_boxes[['x', 'y']] + .5 * df_bounding_boxes[['width', 'height']].values) df_shapes['x_center'] = path_centers.x[df_shapes[shape_i_column]].values df_shapes['y_center'] = path_centers.y[df_shapes[shape_i_column]].values # Calculate coordinate of each path vertex relative to center point of # path. center_offset = (df_shapes[['x', 'y']] - df_shapes[['x_center', 'y_center']].values) return df_shapes.join(center_offset, rsuffix='_center_offset')
python
def compute_shape_centers(df_shapes, shape_i_column, inplace=False): ''' Compute the center point of each polygon shape, and the offset of each vertex to the corresponding polygon center point. Parameters ---------- df_shapes : pandas.DataFrame Table of polygon shape vertices (one row per vertex). Must have at least the following columns: - ``vertex_i``: The index of the vertex within the corresponding shape. - ``x``: The x-coordinate of the vertex. - ``y``: The y-coordinate of the vertex. shape_i_column : str or list, optional Table rows with the same value in the :data:`shape_i_column` column are grouped together as a shape. in_place : bool, optional If ``True``, center coordinate columns are added directly to the input frame. Otherwise, center coordinate columns are added to copy of the input frame. Returns ------- pandas.DataFrame Input frame with the following additional columns: - ``x_center``/``y_center``: Absolute coordinates of shape center. - ``x_center_offset``/``y_center_offset``: * Coordinates of each vertex coordinate relative to shape center. ''' if not isinstance(shape_i_column, bytes): raise KeyError('Shape index must be a single column.') if not inplace: df_shapes = df_shapes.copy() # Get coordinates of center of each path. df_bounding_boxes = get_bounding_boxes(df_shapes, shape_i_column) path_centers = (df_bounding_boxes[['x', 'y']] + .5 * df_bounding_boxes[['width', 'height']].values) df_shapes['x_center'] = path_centers.x[df_shapes[shape_i_column]].values df_shapes['y_center'] = path_centers.y[df_shapes[shape_i_column]].values # Calculate coordinate of each path vertex relative to center point of # path. center_offset = (df_shapes[['x', 'y']] - df_shapes[['x_center', 'y_center']].values) return df_shapes.join(center_offset, rsuffix='_center_offset')
[ "def", "compute_shape_centers", "(", "df_shapes", ",", "shape_i_column", ",", "inplace", "=", "False", ")", ":", "if", "not", "isinstance", "(", "shape_i_column", ",", "bytes", ")", ":", "raise", "KeyError", "(", "'Shape index must be a single column.'", ")", "if", "not", "inplace", ":", "df_shapes", "=", "df_shapes", ".", "copy", "(", ")", "# Get coordinates of center of each path.", "df_bounding_boxes", "=", "get_bounding_boxes", "(", "df_shapes", ",", "shape_i_column", ")", "path_centers", "=", "(", "df_bounding_boxes", "[", "[", "'x'", ",", "'y'", "]", "]", "+", ".5", "*", "df_bounding_boxes", "[", "[", "'width'", ",", "'height'", "]", "]", ".", "values", ")", "df_shapes", "[", "'x_center'", "]", "=", "path_centers", ".", "x", "[", "df_shapes", "[", "shape_i_column", "]", "]", ".", "values", "df_shapes", "[", "'y_center'", "]", "=", "path_centers", ".", "y", "[", "df_shapes", "[", "shape_i_column", "]", "]", ".", "values", "# Calculate coordinate of each path vertex relative to center point of", "# path.", "center_offset", "=", "(", "df_shapes", "[", "[", "'x'", ",", "'y'", "]", "]", "-", "df_shapes", "[", "[", "'x_center'", ",", "'y_center'", "]", "]", ".", "values", ")", "return", "df_shapes", ".", "join", "(", "center_offset", ",", "rsuffix", "=", "'_center_offset'", ")" ]
Compute the center point of each polygon shape, and the offset of each vertex to the corresponding polygon center point. Parameters ---------- df_shapes : pandas.DataFrame Table of polygon shape vertices (one row per vertex). Must have at least the following columns: - ``vertex_i``: The index of the vertex within the corresponding shape. - ``x``: The x-coordinate of the vertex. - ``y``: The y-coordinate of the vertex. shape_i_column : str or list, optional Table rows with the same value in the :data:`shape_i_column` column are grouped together as a shape. in_place : bool, optional If ``True``, center coordinate columns are added directly to the input frame. Otherwise, center coordinate columns are added to copy of the input frame. Returns ------- pandas.DataFrame Input frame with the following additional columns: - ``x_center``/``y_center``: Absolute coordinates of shape center. - ``x_center_offset``/``y_center_offset``: * Coordinates of each vertex coordinate relative to shape center.
[ "Compute", "the", "center", "point", "of", "each", "polygon", "shape", "and", "the", "offset", "of", "each", "vertex", "to", "the", "corresponding", "polygon", "center", "point", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/__init__.py#L207-L257
sci-bots/svg-model
svg_model/__init__.py
scale_points
def scale_points(df_points, scale=INKSCAPE_PPmm.magnitude, inplace=False): ''' Translate points such that bounding box is anchored at (0, 0) and scale ``x`` and ``y`` columns of input frame by specified :data:`scale`. Parameters ---------- df_points : pandas.DataFrame Table of ``x``/``y`` point positions. Must have at least the following columns: - ``x``: x-coordinate - ``y``: y-coordinate scale : float, optional Factor to scale points by. By default, scale to millimeters based on Inkscape default of 90 pixels-per-inch. scale : float, optional Factor to scale points by. in_place : bool, optional If ``True``, input frame will be modified. Otherwise, the scaled points are written to a new frame, leaving the input frame unmodified. Returns ------- pandas.DataFrame Input frame with the points translated such that bounding box is anchored at (0, 0) and ``x`` and ``y`` values scaled by specified :data:`scale`. ''' if not inplace: df_points = df_points.copy() # Offset device, such that all coordinates are >= 0. df_points.x -= df_points.x.min() df_points.y -= df_points.y.min() # Scale path coordinates. df_points.x /= scale df_points.y /= scale return df_points
python
def scale_points(df_points, scale=INKSCAPE_PPmm.magnitude, inplace=False): ''' Translate points such that bounding box is anchored at (0, 0) and scale ``x`` and ``y`` columns of input frame by specified :data:`scale`. Parameters ---------- df_points : pandas.DataFrame Table of ``x``/``y`` point positions. Must have at least the following columns: - ``x``: x-coordinate - ``y``: y-coordinate scale : float, optional Factor to scale points by. By default, scale to millimeters based on Inkscape default of 90 pixels-per-inch. scale : float, optional Factor to scale points by. in_place : bool, optional If ``True``, input frame will be modified. Otherwise, the scaled points are written to a new frame, leaving the input frame unmodified. Returns ------- pandas.DataFrame Input frame with the points translated such that bounding box is anchored at (0, 0) and ``x`` and ``y`` values scaled by specified :data:`scale`. ''' if not inplace: df_points = df_points.copy() # Offset device, such that all coordinates are >= 0. df_points.x -= df_points.x.min() df_points.y -= df_points.y.min() # Scale path coordinates. df_points.x /= scale df_points.y /= scale return df_points
[ "def", "scale_points", "(", "df_points", ",", "scale", "=", "INKSCAPE_PPmm", ".", "magnitude", ",", "inplace", "=", "False", ")", ":", "if", "not", "inplace", ":", "df_points", "=", "df_points", ".", "copy", "(", ")", "# Offset device, such that all coordinates are >= 0.", "df_points", ".", "x", "-=", "df_points", ".", "x", ".", "min", "(", ")", "df_points", ".", "y", "-=", "df_points", ".", "y", ".", "min", "(", ")", "# Scale path coordinates.", "df_points", ".", "x", "/=", "scale", "df_points", ".", "y", "/=", "scale", "return", "df_points" ]
Translate points such that bounding box is anchored at (0, 0) and scale ``x`` and ``y`` columns of input frame by specified :data:`scale`. Parameters ---------- df_points : pandas.DataFrame Table of ``x``/``y`` point positions. Must have at least the following columns: - ``x``: x-coordinate - ``y``: y-coordinate scale : float, optional Factor to scale points by. By default, scale to millimeters based on Inkscape default of 90 pixels-per-inch. scale : float, optional Factor to scale points by. in_place : bool, optional If ``True``, input frame will be modified. Otherwise, the scaled points are written to a new frame, leaving the input frame unmodified. Returns ------- pandas.DataFrame Input frame with the points translated such that bounding box is anchored at (0, 0) and ``x`` and ``y`` values scaled by specified :data:`scale`.
[ "Translate", "points", "such", "that", "bounding", "box", "is", "anchored", "at", "(", "0", "0", ")", "and", "scale", "x", "and", "y", "columns", "of", "input", "frame", "by", "specified", ":", "data", ":", "scale", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/__init__.py#L260-L304
sci-bots/svg-model
svg_model/__init__.py
scale_to_fit_a_in_b
def scale_to_fit_a_in_b(a_shape, b_shape): ''' Return scale factor (scalar float) to fit `a_shape` into `b_shape` while maintaining aspect ratio. Arguments --------- a_shape, b_shape : pandas.Series Input shapes containing numeric `width` and `height` values. Returns ------- float Scale factor to fit :data:`a_shape` into :data:`b_shape` while maintaining aspect ratio. ''' # Normalize the shapes to allow comparison. a_shape_normal = a_shape / a_shape.max() b_shape_normal = b_shape / b_shape.max() if a_shape_normal.width > b_shape_normal.width: a_shape_normal *= b_shape_normal.width / a_shape_normal.width if a_shape_normal.height > b_shape_normal.height: a_shape_normal *= b_shape_normal.height / a_shape_normal.height return a_shape_normal.max() * b_shape.max() / a_shape.max()
python
def scale_to_fit_a_in_b(a_shape, b_shape): ''' Return scale factor (scalar float) to fit `a_shape` into `b_shape` while maintaining aspect ratio. Arguments --------- a_shape, b_shape : pandas.Series Input shapes containing numeric `width` and `height` values. Returns ------- float Scale factor to fit :data:`a_shape` into :data:`b_shape` while maintaining aspect ratio. ''' # Normalize the shapes to allow comparison. a_shape_normal = a_shape / a_shape.max() b_shape_normal = b_shape / b_shape.max() if a_shape_normal.width > b_shape_normal.width: a_shape_normal *= b_shape_normal.width / a_shape_normal.width if a_shape_normal.height > b_shape_normal.height: a_shape_normal *= b_shape_normal.height / a_shape_normal.height return a_shape_normal.max() * b_shape.max() / a_shape.max()
[ "def", "scale_to_fit_a_in_b", "(", "a_shape", ",", "b_shape", ")", ":", "# Normalize the shapes to allow comparison.", "a_shape_normal", "=", "a_shape", "/", "a_shape", ".", "max", "(", ")", "b_shape_normal", "=", "b_shape", "/", "b_shape", ".", "max", "(", ")", "if", "a_shape_normal", ".", "width", ">", "b_shape_normal", ".", "width", ":", "a_shape_normal", "*=", "b_shape_normal", ".", "width", "/", "a_shape_normal", ".", "width", "if", "a_shape_normal", ".", "height", ">", "b_shape_normal", ".", "height", ":", "a_shape_normal", "*=", "b_shape_normal", ".", "height", "/", "a_shape_normal", ".", "height", "return", "a_shape_normal", ".", "max", "(", ")", "*", "b_shape", ".", "max", "(", ")", "/", "a_shape", ".", "max", "(", ")" ]
Return scale factor (scalar float) to fit `a_shape` into `b_shape` while maintaining aspect ratio. Arguments --------- a_shape, b_shape : pandas.Series Input shapes containing numeric `width` and `height` values. Returns ------- float Scale factor to fit :data:`a_shape` into :data:`b_shape` while maintaining aspect ratio.
[ "Return", "scale", "factor", "(", "scalar", "float", ")", "to", "fit", "a_shape", "into", "b_shape", "while", "maintaining", "aspect", "ratio", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/__init__.py#L307-L333
sci-bots/svg-model
svg_model/__init__.py
fit_points_in_bounding_box
def fit_points_in_bounding_box(df_points, bounding_box, padding_fraction=0): ''' Return data frame with ``x``, ``y`` columns scaled to fit points from :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. Arguments --------- df_points : pandas.DataFrame A frame with at least the columns ``x`` and ``y``, containing one row per point. bounding_box: pandas.Series A `pandas.Series` containing numeric `width` and `height` values. padding_fraction : float Fraction of padding to add around points. Returns ------- pandas.DataFrame Input frame with the points with ``x`` and ``y`` values scaled to fill :data:`bounding_box` while maintaining aspect ratio. ''' df_scaled_points = df_points.copy() offset, padded_scale = fit_points_in_bounding_box_params(df_points, bounding_box, padding_fraction) df_scaled_points[['x', 'y']] *= padded_scale df_scaled_points[['x', 'y']] += offset return df_scaled_points
python
def fit_points_in_bounding_box(df_points, bounding_box, padding_fraction=0): ''' Return data frame with ``x``, ``y`` columns scaled to fit points from :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. Arguments --------- df_points : pandas.DataFrame A frame with at least the columns ``x`` and ``y``, containing one row per point. bounding_box: pandas.Series A `pandas.Series` containing numeric `width` and `height` values. padding_fraction : float Fraction of padding to add around points. Returns ------- pandas.DataFrame Input frame with the points with ``x`` and ``y`` values scaled to fill :data:`bounding_box` while maintaining aspect ratio. ''' df_scaled_points = df_points.copy() offset, padded_scale = fit_points_in_bounding_box_params(df_points, bounding_box, padding_fraction) df_scaled_points[['x', 'y']] *= padded_scale df_scaled_points[['x', 'y']] += offset return df_scaled_points
[ "def", "fit_points_in_bounding_box", "(", "df_points", ",", "bounding_box", ",", "padding_fraction", "=", "0", ")", ":", "df_scaled_points", "=", "df_points", ".", "copy", "(", ")", "offset", ",", "padded_scale", "=", "fit_points_in_bounding_box_params", "(", "df_points", ",", "bounding_box", ",", "padding_fraction", ")", "df_scaled_points", "[", "[", "'x'", ",", "'y'", "]", "]", "*=", "padded_scale", "df_scaled_points", "[", "[", "'x'", ",", "'y'", "]", "]", "+=", "offset", "return", "df_scaled_points" ]
Return data frame with ``x``, ``y`` columns scaled to fit points from :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. Arguments --------- df_points : pandas.DataFrame A frame with at least the columns ``x`` and ``y``, containing one row per point. bounding_box: pandas.Series A `pandas.Series` containing numeric `width` and `height` values. padding_fraction : float Fraction of padding to add around points. Returns ------- pandas.DataFrame Input frame with the points with ``x`` and ``y`` values scaled to fill :data:`bounding_box` while maintaining aspect ratio.
[ "Return", "data", "frame", "with", "x", "y", "columns", "scaled", "to", "fit", "points", "from", ":", "data", ":", "df_points", "to", "fill", ":", "data", ":", "bounding_box", "while", "maintaining", "aspect", "ratio", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/__init__.py#L336-L364
sci-bots/svg-model
svg_model/__init__.py
fit_points_in_bounding_box_params
def fit_points_in_bounding_box_params(df_points, bounding_box, padding_fraction=0): ''' Return offset and scale factor to scale ``x``, ``y`` columns of :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. Arguments --------- df_points : pandas.DataFrame A frame with at least the columns ``x`` and ``y``, containing one row per point. bounding_box: pandas.Series A `pandas.Series` containing numeric `width` and `height` values. padding_fraction : float Fraction of padding to add around points. Returns ------- (offset, scale) : (pandas.Series, float) Offset translation and scale required to fit all points in :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. :data:`offset` contains ``x`` and ``y`` values for the offset. ''' width = df_points.x.max() height = df_points.y.max() points_bbox = pd.Series([width, height], index=['width', 'height']) fill_scale = 1 - 2 * padding_fraction assert(fill_scale > 0) scale = scale_to_fit_a_in_b(points_bbox, bounding_box) padded_scale = scale * fill_scale offset = .5 * (bounding_box - points_bbox * padded_scale) offset.index = ['x', 'y'] return offset, padded_scale
python
def fit_points_in_bounding_box_params(df_points, bounding_box, padding_fraction=0): ''' Return offset and scale factor to scale ``x``, ``y`` columns of :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. Arguments --------- df_points : pandas.DataFrame A frame with at least the columns ``x`` and ``y``, containing one row per point. bounding_box: pandas.Series A `pandas.Series` containing numeric `width` and `height` values. padding_fraction : float Fraction of padding to add around points. Returns ------- (offset, scale) : (pandas.Series, float) Offset translation and scale required to fit all points in :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. :data:`offset` contains ``x`` and ``y`` values for the offset. ''' width = df_points.x.max() height = df_points.y.max() points_bbox = pd.Series([width, height], index=['width', 'height']) fill_scale = 1 - 2 * padding_fraction assert(fill_scale > 0) scale = scale_to_fit_a_in_b(points_bbox, bounding_box) padded_scale = scale * fill_scale offset = .5 * (bounding_box - points_bbox * padded_scale) offset.index = ['x', 'y'] return offset, padded_scale
[ "def", "fit_points_in_bounding_box_params", "(", "df_points", ",", "bounding_box", ",", "padding_fraction", "=", "0", ")", ":", "width", "=", "df_points", ".", "x", ".", "max", "(", ")", "height", "=", "df_points", ".", "y", ".", "max", "(", ")", "points_bbox", "=", "pd", ".", "Series", "(", "[", "width", ",", "height", "]", ",", "index", "=", "[", "'width'", ",", "'height'", "]", ")", "fill_scale", "=", "1", "-", "2", "*", "padding_fraction", "assert", "(", "fill_scale", ">", "0", ")", "scale", "=", "scale_to_fit_a_in_b", "(", "points_bbox", ",", "bounding_box", ")", "padded_scale", "=", "scale", "*", "fill_scale", "offset", "=", ".5", "*", "(", "bounding_box", "-", "points_bbox", "*", "padded_scale", ")", "offset", ".", "index", "=", "[", "'x'", ",", "'y'", "]", "return", "offset", ",", "padded_scale" ]
Return offset and scale factor to scale ``x``, ``y`` columns of :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. Arguments --------- df_points : pandas.DataFrame A frame with at least the columns ``x`` and ``y``, containing one row per point. bounding_box: pandas.Series A `pandas.Series` containing numeric `width` and `height` values. padding_fraction : float Fraction of padding to add around points. Returns ------- (offset, scale) : (pandas.Series, float) Offset translation and scale required to fit all points in :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. :data:`offset` contains ``x`` and ``y`` values for the offset.
[ "Return", "offset", "and", "scale", "factor", "to", "scale", "x", "y", "columns", "of", ":", "data", ":", "df_points", "to", "fill", ":", "data", ":", "bounding_box", "while", "maintaining", "aspect", "ratio", "." ]
train
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/__init__.py#L367-L405