body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def map_items(func, v):
'A helper to apply `func` to all elements (keys and values) within dict\n\n No type checking of values passed to func is done, so `func`\n should be resilient to values which it should not handle\n\n Initial usecase - apply_recursive(url_fragment, ensure_unicode)\n '
return v.__class__((item.__class__(map(func, item)) for item in v.items())) | -4,214,522,138,324,589,600 | A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode) | datalad/utils.py | map_items | AKSoo/datalad | python | def map_items(func, v):
'A helper to apply `func` to all elements (keys and values) within dict\n\n No type checking of values passed to func is done, so `func`\n should be resilient to values which it should not handle\n\n Initial usecase - apply_recursive(url_fragment, ensure_unicode)\n '
return v.__class__((item.__class__(map(func, item)) for item in v.items())) |
def partition(items, predicate=bool):
"Partition `items` by `predicate`.\n\n Parameters\n ----------\n items : iterable\n predicate : callable\n A function that will be mapped over each element in `items`. The\n elements will partitioned based on whether the return value is false or\n true.\n\n Returns\n -------\n A tuple with two generators, the first for 'false' items and the second for\n 'true' ones.\n\n Notes\n -----\n Taken from Peter Otten's snippet posted at\n https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html\n "
(a, b) = tee(((predicate(item), item) for item in items))
return ((item for (pred, item) in a if (not pred)), (item for (pred, item) in b if pred)) | 5,726,742,929,948,577,000 | Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html | datalad/utils.py | partition | AKSoo/datalad | python | def partition(items, predicate=bool):
"Partition `items` by `predicate`.\n\n Parameters\n ----------\n items : iterable\n predicate : callable\n A function that will be mapped over each element in `items`. The\n elements will partitioned based on whether the return value is false or\n true.\n\n Returns\n -------\n A tuple with two generators, the first for 'false' items and the second for\n 'true' ones.\n\n Notes\n -----\n Taken from Peter Otten's snippet posted at\n https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html\n "
(a, b) = tee(((predicate(item), item) for item in items))
return ((item for (pred, item) in a if (not pred)), (item for (pred, item) in b if pred)) |
def generate_chunks(container, size):
'Given a container, generate chunks from it with size up to `size`\n '
assert (size > 0), 'Size should be non-0 positive'
while container:
(yield container[:size])
container = container[size:] | 4,757,401,932,832,806,000 | Given a container, generate chunks from it with size up to `size` | datalad/utils.py | generate_chunks | AKSoo/datalad | python | def generate_chunks(container, size):
'\n '
assert (size > 0), 'Size should be non-0 positive'
while container:
(yield container[:size])
container = container[size:] |
def generate_file_chunks(files, cmd=None):
'Given a list of files, generate chunks of them to avoid exceeding cmdline length\n\n Parameters\n ----------\n files: list of str\n cmd: str or list of str, optional\n Command to account for as well\n '
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = (max(map(len, files)) if files else 0)
chunk_size = max(1, (((CMD_MAX_ARG - sum(((len(x) + 3) for x in cmd))) - 4) // (maxl + 3)))
file_chunks = generate_chunks(files, chunk_size)
return file_chunks | -910,834,174,419,088,900 | Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well | datalad/utils.py | generate_file_chunks | AKSoo/datalad | python | def generate_file_chunks(files, cmd=None):
'Given a list of files, generate chunks of them to avoid exceeding cmdline length\n\n Parameters\n ----------\n files: list of str\n cmd: str or list of str, optional\n Command to account for as well\n '
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = (max(map(len, files)) if files else 0)
chunk_size = max(1, (((CMD_MAX_ARG - sum(((len(x) + 3) for x in cmd))) - 4) // (maxl + 3)))
file_chunks = generate_chunks(files, chunk_size)
return file_chunks |
def saved_generator(gen):
'Given a generator returns two generators, where 2nd one just replays\n\n So the first one would be going through the generated items and 2nd one\n would be yielding saved items\n '
saved = []
def gen1():
for x in gen:
saved.append(x)
(yield x)
def gen2():
for x in saved:
(yield x)
return (gen1(), gen2()) | 6,482,172,339,794,419,000 | Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items | datalad/utils.py | saved_generator | AKSoo/datalad | python | def saved_generator(gen):
'Given a generator returns two generators, where 2nd one just replays\n\n So the first one would be going through the generated items and 2nd one\n would be yielding saved items\n '
saved = []
def gen1():
for x in gen:
saved.append(x)
(yield x)
def gen2():
for x in saved:
(yield x)
return (gen1(), gen2()) |
def optional_args(decorator):
'allows a decorator to take optional positional and keyword arguments.\n Assumes that taking a single, callable, positional argument means that\n it is decorating a function, i.e. something like this::\n\n @my_decorator\n def function(): pass\n\n Calls decorator with decorator(f, `*args`, `**kwargs`)'
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = ((not kwargs) and (len(args) == 1) and isinstance(args[0], Callable))
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper | 4,930,945,743,788,969,000 | allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`) | datalad/utils.py | optional_args | AKSoo/datalad | python | def optional_args(decorator):
'allows a decorator to take optional positional and keyword arguments.\n Assumes that taking a single, callable, positional argument means that\n it is decorating a function, i.e. something like this::\n\n @my_decorator\n def function(): pass\n\n Calls decorator with decorator(f, `*args`, `**kwargs`)'
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = ((not kwargs) and (len(args) == 1) and isinstance(args[0], Callable))
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper |
def get_tempfile_kwargs(tkwargs=None, prefix='', wrapped=None):
'Updates kwargs to be passed to tempfile. calls depending on env vars\n '
if (tkwargs is None):
tkwargs_ = {}
else:
tkwargs_ = tkwargs.copy()
if ('prefix' not in tkwargs_):
tkwargs_['prefix'] = '_'.join(((['datalad_temp'] + ([prefix] if prefix else [])) + ([''] if (on_windows or (not wrapped)) else [wrapped.__name__])))
directory = os.environ.get('TMPDIR')
if (directory and ('dir' not in tkwargs_)):
tkwargs_['dir'] = directory
return tkwargs_ | 3,268,730,254,755,503,000 | Updates kwargs to be passed to tempfile. calls depending on env vars | datalad/utils.py | get_tempfile_kwargs | AKSoo/datalad | python | def get_tempfile_kwargs(tkwargs=None, prefix=, wrapped=None):
'\n '
if (tkwargs is None):
tkwargs_ = {}
else:
tkwargs_ = tkwargs.copy()
if ('prefix' not in tkwargs_):
tkwargs_['prefix'] = '_'.join(((['datalad_temp'] + ([prefix] if prefix else [])) + ([] if (on_windows or (not wrapped)) else [wrapped.__name__])))
directory = os.environ.get('TMPDIR')
if (directory and ('dir' not in tkwargs_)):
tkwargs_['dir'] = directory
return tkwargs_ |
@optional_args
def line_profile(func):
'Q&D helper to line profile the function and spit out stats\n '
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile | 7,644,648,597,135,648,000 | Q&D helper to line profile the function and spit out stats | datalad/utils.py | line_profile | AKSoo/datalad | python | @optional_args
def line_profile(func):
'\n '
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile |
@optional_args
def collect_method_callstats(func):
'Figure out methods which call the method repeatedly on the same instance\n\n Use case(s):\n - .repo is expensive since does all kinds of checks.\n - .config is expensive transitively since it calls .repo each time\n\n TODO:\n - fancy one could look through the stack for the same id(self) to see if\n that location is already in memo. That would hint to the cases where object\n is not passed into underlying functions, causing them to redo the same work\n over and over again\n - ATM might flood with all "1 lines" calls which are not that informative.\n The underlying possibly suboptimal use might be coming from their callers.\n It might or not relate to the previous TODO\n '
from collections import defaultdict
import traceback
from time import time
memo = defaultdict((lambda : defaultdict(int)))
times = []
toppath = (dirname(__file__) + sep)
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[(- 2)]
stack_sig = '{relpath}:{s.name}'.format(s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append((time() - t0))
pass
def print_stats():
print('The cost of property {}:'.format(func.__name__))
if (not memo):
print('None since no calls')
return
counts = {k: sum(v.values()) for (k, v) in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(' Total: {} calls from {} objects with {} contexts taking {:.2f} sec'.format(total, len(ids), len(memo), sum(times)))
for ((self_id, caller), count) in sorted(counts.items(), key=(lambda x: x[1]), reverse=True):
print(' {} {}: {} from {} lines'.format(self_id, caller, count, len(memo[(self_id, caller)])))
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats | -9,041,022,728,436,863,000 | Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO | datalad/utils.py | collect_method_callstats | AKSoo/datalad | python | @optional_args
def collect_method_callstats(func):
'Figure out methods which call the method repeatedly on the same instance\n\n Use case(s):\n - .repo is expensive since does all kinds of checks.\n - .config is expensive transitively since it calls .repo each time\n\n TODO:\n - fancy one could look through the stack for the same id(self) to see if\n that location is already in memo. That would hint to the cases where object\n is not passed into underlying functions, causing them to redo the same work\n over and over again\n - ATM might flood with all "1 lines" calls which are not that informative.\n The underlying possibly suboptimal use might be coming from their callers.\n It might or not relate to the previous TODO\n '
from collections import defaultdict
import traceback
from time import time
memo = defaultdict((lambda : defaultdict(int)))
times = []
toppath = (dirname(__file__) + sep)
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[(- 2)]
stack_sig = '{relpath}:{s.name}'.format(s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append((time() - t0))
pass
def print_stats():
print('The cost of property {}:'.format(func.__name__))
if (not memo):
print('None since no calls')
return
counts = {k: sum(v.values()) for (k, v) in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(' Total: {} calls from {} objects with {} contexts taking {:.2f} sec'.format(total, len(ids), len(memo), sum(times)))
for ((self_id, caller), count) in sorted(counts.items(), key=(lambda x: x[1]), reverse=True):
print(' {} {}: {} from {} lines'.format(self_id, caller, count, len(memo[(self_id, caller)])))
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats |
def never_fail(f):
'Assure that function never fails -- all exceptions are caught\n\n Returns `None` if function fails internally.\n '
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(('DataLad internal failure while running %s: %r. Please report at https://github.com/datalad/datalad/issues' % (f, e)))
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func | -3,585,793,547,426,326,000 | Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally. | datalad/utils.py | never_fail | AKSoo/datalad | python | def never_fail(f):
'Assure that function never fails -- all exceptions are caught\n\n Returns `None` if function fails internally.\n '
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(('DataLad internal failure while running %s: %r. Please report at https://github.com/datalad/datalad/issues' % (f, e)))
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func |
@contextmanager
def nothing_cm():
'Just a dummy cm to programmically switch context managers'
(yield) | -6,187,207,211,022,128,000 | Just a dummy cm to programmically switch context managers | datalad/utils.py | nothing_cm | AKSoo/datalad | python | @contextmanager
def nothing_cm():
(yield) |
@contextmanager
def swallow_outputs():
'Context manager to help consuming both stdout and stderr, and print()\n\n stdout is available as cm.out and stderr as cm.err whenever cm is the\n yielded context manager.\n Internally uses temporary files to guarantee absent side-effects of swallowing\n into StringIO which lacks .fileno.\n\n print mocking is necessary for some uses where sys.stdout was already bound\n to original sys.stdout, thus mocking it later had no effect. Overriding\n print function had desired effect\n '
class StringIOAdapter(object):
'Little adapter to help getting out/err values\n '
def __init__(self):
kw = get_tempfile_kwargs({}, prefix='outputs')
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if (not self._out.closed):
self._out.flush()
return self._read(self._out)
@property
def err(self):
if (not self._err.closed):
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return (self._out, self._err)
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if (cfg.getbool('datalad.log', 'outputs', default=False) and (lgr.getEffectiveLevel() <= logging.DEBUG)):
for (s, sname) in ((self.out, 'stdout'), (self.err, 'stderr')):
if s:
pref = (os.linesep + '| ')
lgr.debug('Swallowed %s:%s%s', sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug('Nothing was swallowed for %s', sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if (file in (oldout, olderr, sys.stdout, sys.stderr)):
try:
sys.stdout.write((sep.join(args) + end))
except UnicodeEncodeError as exc:
lgr.error("Failed to write to mocked stdout, got %s, continue as it didn't happen", exc)
else:
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
oldprint = getattr(builtins, 'print')
(oldout, olderr) = (sys.stdout, sys.stderr)
olduiout = ui.out
adapter = StringIOAdapter()
try:
(sys.stdout, sys.stderr) = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
(yield adapter)
finally:
(sys.stdout, sys.stderr, ui.out) = (oldout, olderr, olduiout)
setattr(builtins, 'print', oldprint)
adapter.cleanup() | 2,491,819,345,219,235,300 | Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect | datalad/utils.py | swallow_outputs | AKSoo/datalad | python | @contextmanager
def swallow_outputs():
'Context manager to help consuming both stdout and stderr, and print()\n\n stdout is available as cm.out and stderr as cm.err whenever cm is the\n yielded context manager.\n Internally uses temporary files to guarantee absent side-effects of swallowing\n into StringIO which lacks .fileno.\n\n print mocking is necessary for some uses where sys.stdout was already bound\n to original sys.stdout, thus mocking it later had no effect. Overriding\n print function had desired effect\n '
class StringIOAdapter(object):
'Little adapter to help getting out/err values\n '
def __init__(self):
kw = get_tempfile_kwargs({}, prefix='outputs')
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if (not self._out.closed):
self._out.flush()
return self._read(self._out)
@property
def err(self):
if (not self._err.closed):
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return (self._out, self._err)
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if (cfg.getbool('datalad.log', 'outputs', default=False) and (lgr.getEffectiveLevel() <= logging.DEBUG)):
for (s, sname) in ((self.out, 'stdout'), (self.err, 'stderr')):
if s:
pref = (os.linesep + '| ')
lgr.debug('Swallowed %s:%s%s', sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug('Nothing was swallowed for %s', sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if (file in (oldout, olderr, sys.stdout, sys.stderr)):
try:
sys.stdout.write((sep.join(args) + end))
except UnicodeEncodeError as exc:
lgr.error("Failed to write to mocked stdout, got %s, continue as it didn't happen", exc)
else:
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
oldprint = getattr(builtins, 'print')
(oldout, olderr) = (sys.stdout, sys.stderr)
olduiout = ui.out
adapter = StringIOAdapter()
try:
(sys.stdout, sys.stderr) = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
(yield adapter)
finally:
(sys.stdout, sys.stderr, ui.out) = (oldout, olderr, olduiout)
setattr(builtins, 'print', oldprint)
adapter.cleanup() |
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
'Context manager to consume all logs.\n\n '
lgr = logging.getLogger(name)
old_level = lgr.level
old_handlers = lgr.handlers
class StringIOAdapter(object):
'Little adapter to help getting out values\n\n And to stay consistent with how swallow_outputs behaves\n '
def __init__(self):
if (file_ is None):
kw = get_tempfile_kwargs({}, prefix='logs')
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if (self._final_out is not None):
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if (not file_):
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
'Provide assertion on whether a msg was logged at a given level\n\n If neither `msg` nor `level` provided, checks if anything was logged\n at all.\n\n Parameters\n ----------\n msg: str, optional\n Message (as a regular expression, if `regex`) to be searched.\n If no msg provided, checks if anything was logged at a given level.\n level: str, optional\n String representing the level to be logged\n regex: bool, optional\n If False, regular `assert_in` is used\n **kwargs: str, optional\n Passed to `assert_re_in` or `assert_in`\n '
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = (('\\[%s\\] ' % level) if level else '\\[\\S+\\] ')
else:
match = (('[%s] ' % level) if level else '')
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert (not kwargs), 'no kwargs to be passed anywhere'
assert self.out, 'Nothing was logged!?'
adapter = StringIOAdapter()
swallow_handler = logging.StreamHandler(adapter.handle)
swallow_handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers], [])
lgr.handlers = [swallow_handler]
if (old_level < logging.DEBUG):
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if (new_level is not None):
lgr.setLevel(new_level)
try:
(yield adapter)
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup() | 1,683,860,999,491,172,400 | Context manager to consume all logs. | datalad/utils.py | swallow_logs | AKSoo/datalad | python | @contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
'\n\n '
lgr = logging.getLogger(name)
old_level = lgr.level
old_handlers = lgr.handlers
class StringIOAdapter(object):
'Little adapter to help getting out values\n\n And to stay consistent with how swallow_outputs behaves\n '
def __init__(self):
if (file_ is None):
kw = get_tempfile_kwargs({}, prefix='logs')
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if (self._final_out is not None):
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if (not file_):
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
'Provide assertion on whether a msg was logged at a given level\n\n If neither `msg` nor `level` provided, checks if anything was logged\n at all.\n\n Parameters\n ----------\n msg: str, optional\n Message (as a regular expression, if `regex`) to be searched.\n If no msg provided, checks if anything was logged at a given level.\n level: str, optional\n String representing the level to be logged\n regex: bool, optional\n If False, regular `assert_in` is used\n **kwargs: str, optional\n Passed to `assert_re_in` or `assert_in`\n '
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = (('\\[%s\\] ' % level) if level else '\\[\\S+\\] ')
else:
match = (('[%s] ' % level) if level else )
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert (not kwargs), 'no kwargs to be passed anywhere'
assert self.out, 'Nothing was logged!?'
adapter = StringIOAdapter()
swallow_handler = logging.StreamHandler(adapter.handle)
swallow_handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers], [])
lgr.handlers = [swallow_handler]
if (old_level < logging.DEBUG):
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if (new_level is not None):
lgr.setLevel(new_level)
try:
(yield adapter)
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup() |
@contextmanager
def disable_logger(logger=None):
"context manager to temporarily disable logging\n\n This is to provide one of swallow_logs' purposes without unnecessarily\n creating temp files (see gh-1865)\n\n Parameters\n ----------\n logger: Logger\n Logger whose handlers will be ordered to not log anything.\n Default: datalad's topmost Logger ('datalad')\n "
class NullFilter(logging.Filter):
'Filter class to reject all records\n '
def filter(self, record):
return 0
if (logger is None):
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
(yield logger)
finally:
[h.removeFilter(filter_) for h in logger.handlers] | -2,598,924,556,852,235,300 | context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad') | datalad/utils.py | disable_logger | AKSoo/datalad | python | @contextmanager
def disable_logger(logger=None):
"context manager to temporarily disable logging\n\n This is to provide one of swallow_logs' purposes without unnecessarily\n creating temp files (see gh-1865)\n\n Parameters\n ----------\n logger: Logger\n Logger whose handlers will be ordered to not log anything.\n Default: datalad's topmost Logger ('datalad')\n "
class NullFilter(logging.Filter):
'Filter class to reject all records\n '
def filter(self, record):
return 0
if (logger is None):
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
(yield logger)
finally:
[h.removeFilter(filter_) for h in logger.handlers] |
def setup_exceptionhook(ipython=False):
'Overloads default sys.excepthook with our exceptionhook handler.\n\n If interactive, our exceptionhook handler will invoke\n pdb.post_mortem; if not interactive, then invokes default handler.\n '
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose', call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook | 2,870,071,984,776,959,500 | Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler. | datalad/utils.py | setup_exceptionhook | AKSoo/datalad | python | def setup_exceptionhook(ipython=False):
'Overloads default sys.excepthook with our exceptionhook handler.\n\n If interactive, our exceptionhook handler will invoke\n pdb.post_mortem; if not interactive, then invokes default handler.\n '
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose', call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook |
def ensure_dir(*args):
'Make sure directory exists.\n\n Joins the list of arguments to an os-specific path to the desired\n directory and creates it, if it not exists yet.\n '
dirname = op.join(*args)
if (not exists(dirname)):
os.makedirs(dirname)
return dirname | 2,885,700,046,755,640,300 | Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet. | datalad/utils.py | ensure_dir | AKSoo/datalad | python | def ensure_dir(*args):
'Make sure directory exists.\n\n Joins the list of arguments to an os-specific path to the desired\n directory and creates it, if it not exists yet.\n '
dirname = op.join(*args)
if (not exists(dirname)):
os.makedirs(dirname)
return dirname |
def updated(d, update):
"Return a copy of the input with the 'update'\n\n Primarily for updating dictionaries\n "
d = d.copy()
d.update(update)
return d | -8,583,097,218,153,299,000 | Return a copy of the input with the 'update'
Primarily for updating dictionaries | datalad/utils.py | updated | AKSoo/datalad | python | def updated(d, update):
"Return a copy of the input with the 'update'\n\n Primarily for updating dictionaries\n "
d = d.copy()
d.update(update)
return d |
def getpwd():
'Try to return a CWD without dereferencing possible symlinks\n\n This function will try to use PWD environment variable to provide a current\n working directory, possibly with some directories along the path being\n symlinks to other directories. Unfortunately, PWD is used/set only by the\n shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify\n it, thus `os.getcwd()` returns path with links dereferenced.\n\n While returning current working directory based on PWD env variable we\n verify that the directory is the same as `os.getcwd()` after resolving all\n symlinks. If that verification fails, we fall back to always use\n `os.getcwd()`.\n\n Initial decision to either use PWD env variable or os.getcwd() is done upon\n the first call of this function.\n '
global _pwd_mode
if (_pwd_mode is None):
try:
pwd = os.environ['PWD']
if (on_windows and pwd and pwd.startswith('/')):
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if (_pwd_mode == 'cwd'):
return os.getcwd()
elif (_pwd_mode == 'PWD'):
try:
cwd = os.getcwd()
except OSError as exc:
if ('o such file' in str(exc)):
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
pwd_real = str(Path(pwd).resolve().absolute())
if ((cwd is not None) and (pwd_real != cwd)):
_switch_to_getcwd('realpath of PWD=%s is %s whenever os.getcwd()=%s', pwd, pwd_real, cwd)
return cwd
return pwd
except KeyError:
_switch_to_getcwd('PWD env variable is no longer available')
return cwd
else:
raise RuntimeError(('Must have not got here. pwd_mode must be either cwd or PWD. And it is now %r' % (_pwd_mode,))) | -2,341,153,836,494,955,000 | Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function. | datalad/utils.py | getpwd | AKSoo/datalad | python | def getpwd():
'Try to return a CWD without dereferencing possible symlinks\n\n This function will try to use PWD environment variable to provide a current\n working directory, possibly with some directories along the path being\n symlinks to other directories. Unfortunately, PWD is used/set only by the\n shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify\n it, thus `os.getcwd()` returns path with links dereferenced.\n\n While returning current working directory based on PWD env variable we\n verify that the directory is the same as `os.getcwd()` after resolving all\n symlinks. If that verification fails, we fall back to always use\n `os.getcwd()`.\n\n Initial decision to either use PWD env variable or os.getcwd() is done upon\n the first call of this function.\n '
global _pwd_mode
if (_pwd_mode is None):
try:
pwd = os.environ['PWD']
if (on_windows and pwd and pwd.startswith('/')):
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if (_pwd_mode == 'cwd'):
return os.getcwd()
elif (_pwd_mode == 'PWD'):
try:
cwd = os.getcwd()
except OSError as exc:
if ('o such file' in str(exc)):
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
pwd_real = str(Path(pwd).resolve().absolute())
if ((cwd is not None) and (pwd_real != cwd)):
_switch_to_getcwd('realpath of PWD=%s is %s whenever os.getcwd()=%s', pwd, pwd_real, cwd)
return cwd
return pwd
except KeyError:
_switch_to_getcwd('PWD env variable is no longer available')
return cwd
else:
raise RuntimeError(('Must have not got here. pwd_mode must be either cwd or PWD. And it is now %r' % (_pwd_mode,))) |
def dlabspath(path, norm=False):
'Symlinks-in-the-cwd aware abspath\n\n os.path.abspath relies on os.getcwd() which would not know about symlinks\n in the path\n\n TODO: we might want to norm=True by default to match behavior of\n os .path.abspath?\n '
if (not isabs(path)):
path = op.join(getpwd(), path)
return (normpath(path) if norm else path) | -8,477,254,246,258,512,000 | Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath? | datalad/utils.py | dlabspath | AKSoo/datalad | python | def dlabspath(path, norm=False):
'Symlinks-in-the-cwd aware abspath\n\n os.path.abspath relies on os.getcwd() which would not know about symlinks\n in the path\n\n TODO: we might want to norm=True by default to match behavior of\n os .path.abspath?\n '
if (not isabs(path)):
path = op.join(getpwd(), path)
return (normpath(path) if norm else path) |
def with_pathsep(path):
'Little helper to guarantee that path ends with /'
return ((path + sep) if (not path.endswith(sep)) else path) | -6,586,447,538,217,079,000 | Little helper to guarantee that path ends with / | datalad/utils.py | with_pathsep | AKSoo/datalad | python | def with_pathsep(path):
return ((path + sep) if (not path.endswith(sep)) else path) |
def get_path_prefix(path, pwd=None):
'Get path prefix (for current directory)\n\n Returns relative path to the topdir, if we are under topdir, and if not\n absolute path to topdir. If `pwd` is not specified - current directory\n assumed\n '
pwd = (pwd or getpwd())
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if (common.endswith(sep) and (common in {path_, pwd_})):
location_prefix = relpath(path, pwd)
if (location_prefix in (curdir, (curdir + sep))):
location_prefix = ''
return location_prefix
else:
return path | -7,963,500,683,572,217,000 | Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed | datalad/utils.py | get_path_prefix | AKSoo/datalad | python | def get_path_prefix(path, pwd=None):
'Get path prefix (for current directory)\n\n Returns relative path to the topdir, if we are under topdir, and if not\n absolute path to topdir. If `pwd` is not specified - current directory\n assumed\n '
pwd = (pwd or getpwd())
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if (common.endswith(sep) and (common in {path_, pwd_})):
location_prefix = relpath(path, pwd)
if (location_prefix in (curdir, (curdir + sep))):
location_prefix =
return location_prefix
else:
return path |
def path_startswith(path, prefix):
'Return True if path starts with prefix path\n\n Parameters\n ----------\n path: str\n prefix: str\n '
(path, prefix) = _get_normalized_paths(path, prefix)
return path.startswith(prefix) | -6,795,381,623,888,553,000 | Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str | datalad/utils.py | path_startswith | AKSoo/datalad | python | def path_startswith(path, prefix):
'Return True if path starts with prefix path\n\n Parameters\n ----------\n path: str\n prefix: str\n '
(path, prefix) = _get_normalized_paths(path, prefix)
return path.startswith(prefix) |
def path_is_subpath(path, prefix):
'Return True if path is a subpath of prefix\n\n It will return False if path == prefix.\n\n Parameters\n ----------\n path: str\n prefix: str\n '
(path, prefix) = _get_normalized_paths(path, prefix)
return ((len(prefix) < len(path)) and path.startswith(prefix)) | -3,600,815,073,040,493,600 | Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str | datalad/utils.py | path_is_subpath | AKSoo/datalad | python | def path_is_subpath(path, prefix):
'Return True if path is a subpath of prefix\n\n It will return False if path == prefix.\n\n Parameters\n ----------\n path: str\n prefix: str\n '
(path, prefix) = _get_normalized_paths(path, prefix)
return ((len(prefix) < len(path)) and path.startswith(prefix)) |
def knows_annex(path):
'Returns whether at a given path there is information about an annex\n\n It is just a thin wrapper around GitRepo.is_with_annex() classmethod\n which also checks for `path` to exist first.\n\n This includes actually present annexes, but also uninitialized ones, or\n even the presence of a remote annex branch.\n '
from os.path import exists
if (not exists(path)):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex() | -567,862,675,604,367,170 | Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch. | datalad/utils.py | knows_annex | AKSoo/datalad | python | def knows_annex(path):
'Returns whether at a given path there is information about an annex\n\n It is just a thin wrapper around GitRepo.is_with_annex() classmethod\n which also checks for `path` to exist first.\n\n This includes actually present annexes, but also uninitialized ones, or\n even the presence of a remote annex branch.\n '
from os.path import exists
if (not exists(path)):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex() |
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
'Helper class to provide a temporary file name and remove it at the end (context manager)\n\n Parameters\n ----------\n mkdir : bool, optional (default: False)\n If True, temporary directory created using tempfile.mkdtemp()\n content : str or bytes, optional\n Content to be stored in the file created\n wrapped : function, optional\n If set, function name used to prefix temporary file name\n `**tkwargs`:\n All other arguments are passed into the call to tempfile.mk{,d}temp(),\n and resultant temporary filename is passed as the first argument into\n the function t. If no \'prefix\' argument is provided, it will be\n constructed using module and function names (\'.\' replaced with\n \'_\').\n\n To change the used directory without providing keyword argument \'dir\' set\n DATALAD_TESTS_TEMP_DIR.\n\n Examples\n --------\n >>> from os.path import exists\n >>> from datalad.utils import make_tempfile\n >>> with make_tempfile() as fname:\n ... k = open(fname, \'w\').write(\'silly test\')\n >>> assert not exists(fname) # was removed\n\n >>> with make_tempfile(content="blah") as fname:\n ... assert open(fname).read() == "blah"\n '
if (tkwargs.get('mkdir', None) and (content is not None)):
raise ValueError('mkdir=True while providing content makes no sense')
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_)
filename = Path(filename).resolve()
if content:
(filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content)
filename = str(filename)
if __debug__:
lgr.debug('Created temporary %s named %s', ('directory' if mkdir else 'file'), filename)
try:
(yield filename)
finally:
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = ((lsuffix and filename[:(- lsuffix)]) or filename)
filenames = glob.glob((filename_ + '*'))
if ((len(filename_) < 3) or (len(filenames) > 5)):
lgr.warning(('It is unlikely that it was intended to remove all files matching %r. Skipping' % filename_))
return
for f in filenames:
try:
rmtemp(f)
except OSError:
pass | 7,536,303,112,862,519,000 | Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah" | datalad/utils.py | make_tempfile | AKSoo/datalad | python | @contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
'Helper class to provide a temporary file name and remove it at the end (context manager)\n\n Parameters\n ----------\n mkdir : bool, optional (default: False)\n If True, temporary directory created using tempfile.mkdtemp()\n content : str or bytes, optional\n Content to be stored in the file created\n wrapped : function, optional\n If set, function name used to prefix temporary file name\n `**tkwargs`:\n All other arguments are passed into the call to tempfile.mk{,d}temp(),\n and resultant temporary filename is passed as the first argument into\n the function t. If no \'prefix\' argument is provided, it will be\n constructed using module and function names (\'.\' replaced with\n \'_\').\n\n To change the used directory without providing keyword argument \'dir\' set\n DATALAD_TESTS_TEMP_DIR.\n\n Examples\n --------\n >>> from os.path import exists\n >>> from datalad.utils import make_tempfile\n >>> with make_tempfile() as fname:\n ... k = open(fname, \'w\').write(\'silly test\')\n >>> assert not exists(fname) # was removed\n\n >>> with make_tempfile(content="blah") as fname:\n ... assert open(fname).read() == "blah"\n '
if (tkwargs.get('mkdir', None) and (content is not None)):
raise ValueError('mkdir=True while providing content makes no sense')
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_)
filename = Path(filename).resolve()
if content:
(filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content)
filename = str(filename)
if __debug__:
lgr.debug('Created temporary %s named %s', ('directory' if mkdir else 'file'), filename)
try:
(yield filename)
finally:
lsuffix = len(tkwargs_.get('suffix', ))
filename_ = ((lsuffix and filename[:(- lsuffix)]) or filename)
filenames = glob.glob((filename_ + '*'))
if ((len(filename_) < 3) or (len(filenames) > 5)):
lgr.warning(('It is unlikely that it was intended to remove all files matching %r. Skipping' % filename_))
return
for f in filenames:
try:
rmtemp(f)
except OSError:
pass |
def _path_(*p):
'Given a path in POSIX" notation, regenerate one in native to the env one'
if on_windows:
return op.join(*map((lambda x: op.join(*x.split('/'))), p))
else:
return op.join(*p) | -5,201,050,113,459,553,000 | Given a path in POSIX" notation, regenerate one in native to the env one | datalad/utils.py | _path_ | AKSoo/datalad | python | def _path_(*p):
if on_windows:
return op.join(*map((lambda x: op.join(*x.split('/'))), p))
else:
return op.join(*p) |
def get_timestamp_suffix(time_=None, prefix='-'):
'Return a time stamp (full date and time up to second)\n\n primarily to be used for generation of log files names\n '
args = []
if (time_ is not None):
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime((prefix + TIMESTAMP_FMT), *args) | -1,093,651,240,819,786,200 | Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names | datalad/utils.py | get_timestamp_suffix | AKSoo/datalad | python | def get_timestamp_suffix(time_=None, prefix='-'):
'Return a time stamp (full date and time up to second)\n\n primarily to be used for generation of log files names\n '
args = []
if (time_ is not None):
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime((prefix + TIMESTAMP_FMT), *args) |
def get_logfilename(dspath, cmd='datalad'):
"Return a filename to use for logging under a dataset/repository\n\n directory would be created if doesn't exist, but dspath must exist\n and be a directory\n "
assert exists(dspath)
assert isdir(dspath)
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs')
return op.join(ds_logdir, ('crawl-%s.log' % get_timestamp_suffix())) | -7,525,460,786,256,228,000 | Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory | datalad/utils.py | get_logfilename | AKSoo/datalad | python | def get_logfilename(dspath, cmd='datalad'):
"Return a filename to use for logging under a dataset/repository\n\n directory would be created if doesn't exist, but dspath must exist\n and be a directory\n "
assert exists(dspath)
assert isdir(dspath)
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs')
return op.join(ds_logdir, ('crawl-%s.log' % get_timestamp_suffix())) |
def get_trace(edges, start, end, trace=None):
"Return the trace/path to reach a node in a tree.\n\n Parameters\n ----------\n edges : sequence(2-tuple)\n The tree given by a sequence of edges (parent, child) tuples. The\n nodes can be identified by any value and data type that supports\n the '==' operation.\n start :\n Identifier of the start node. Must be present as a value in the parent\n location of an edge tuple in order to be found.\n end :\n Identifier of the target/end node. Must be present as a value in the child\n location of an edge tuple in order to be found.\n trace : list\n Mostly useful for recursive calls, and used internally.\n\n Returns\n -------\n None or list\n Returns a list with the trace to the target (the starts and the target\n are not included in the trace, hence if start and end are directly connected\n an empty list is returned), or None when no trace to the target can be found,\n or start and end are identical.\n "
if (trace is None):
trace = []
if (not edges):
raise ValueError('no edges given')
for cand in edges:
(cand_super, cand_sub) = cand
if (cand_sub in trace):
continue
if (trace and (cand_super != trace[(- 1)])):
continue
if ((not trace) and (cand_super != start)):
continue
if (cand_sub == end):
return trace
cand_trace = get_trace(edges, start, end, (trace + [cand_sub]))
if cand_trace:
return cand_trace
return None | 2,135,985,338,423,239,000 | Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical. | datalad/utils.py | get_trace | AKSoo/datalad | python | def get_trace(edges, start, end, trace=None):
"Return the trace/path to reach a node in a tree.\n\n Parameters\n ----------\n edges : sequence(2-tuple)\n The tree given by a sequence of edges (parent, child) tuples. The\n nodes can be identified by any value and data type that supports\n the '==' operation.\n start :\n Identifier of the start node. Must be present as a value in the parent\n location of an edge tuple in order to be found.\n end :\n Identifier of the target/end node. Must be present as a value in the child\n location of an edge tuple in order to be found.\n trace : list\n Mostly useful for recursive calls, and used internally.\n\n Returns\n -------\n None or list\n Returns a list with the trace to the target (the starts and the target\n are not included in the trace, hence if start and end are directly connected\n an empty list is returned), or None when no trace to the target can be found,\n or start and end are identical.\n "
if (trace is None):
trace = []
if (not edges):
raise ValueError('no edges given')
for cand in edges:
(cand_super, cand_sub) = cand
if (cand_sub in trace):
continue
if (trace and (cand_super != trace[(- 1)])):
continue
if ((not trace) and (cand_super != start)):
continue
if (cand_sub == end):
return trace
cand_trace = get_trace(edges, start, end, (trace + [cand_sub]))
if cand_trace:
return cand_trace
return None |
def get_dataset_root(path):
"Return the root of an existent dataset containing a given path\n\n The root path is returned in the same absolute or relative form\n as the input argument. If no associated dataset exists, or the\n input path doesn't exist, None is returned.\n\n If `path` is a symlink or something other than a directory, its\n the root dataset containing its parent directory will be reported.\n If none can be found, at a symlink at `path` is pointing to a\n dataset, `path` itself will be reported as the root.\n\n Parameters\n ----------\n path : Path-like\n\n Returns\n -------\n str or None\n "
path = str(path)
suffix = '.git'
altered = None
if (islink(path) or (not isdir(path))):
altered = path
path = dirname(path)
apath = abspath(path)
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
path = normpath(op.join(path, os.pardir))
apath = abspath(path)
if (altered and exists(op.join(altered, suffix))):
return altered
return None | 1,666,258,663,008,669,700 | Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None | datalad/utils.py | get_dataset_root | AKSoo/datalad | python | def get_dataset_root(path):
"Return the root of an existent dataset containing a given path\n\n The root path is returned in the same absolute or relative form\n as the input argument. If no associated dataset exists, or the\n input path doesn't exist, None is returned.\n\n If `path` is a symlink or something other than a directory, its\n the root dataset containing its parent directory will be reported.\n If none can be found, at a symlink at `path` is pointing to a\n dataset, `path` itself will be reported as the root.\n\n Parameters\n ----------\n path : Path-like\n\n Returns\n -------\n str or None\n "
path = str(path)
suffix = '.git'
altered = None
if (islink(path) or (not isdir(path))):
altered = path
path = dirname(path)
apath = abspath(path)
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
path = normpath(op.join(path, os.pardir))
apath = abspath(path)
if (altered and exists(op.join(altered, suffix))):
return altered
return None |
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
'Call f multiple times making exponentially growing delay between the calls'
for trial in range(1, (ntrials + 1)):
try:
return f(*args, **kwargs)
except exception as exc:
if (trial == ntrials):
raise
t = (base ** trial)
lgr.warning('Caught %s on trial #%d. Sleeping %f and retrying', CapturedException(exc), trial, t)
sleep(t) | -2,131,526,919,984,439,300 | Call f multiple times making exponentially growing delay between the calls | datalad/utils.py | try_multiple | AKSoo/datalad | python | def try_multiple(ntrials, exception, base, f, *args, **kwargs):
for trial in range(1, (ntrials + 1)):
try:
return f(*args, **kwargs)
except exception as exc:
if (trial == ntrials):
raise
t = (base ** trial)
lgr.warning('Caught %s on trial #%d. Sleeping %f and retrying', CapturedException(exc), trial, t)
sleep(t) |
@optional_args
def try_multiple_dec(f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None):
"Decorator to try function multiple times.\n\n Main purpose is to decorate functions dealing with removal of files/directories\n and which might need a few seconds to work correctly on Windows which takes\n its time to release files/directories.\n\n Parameters\n ----------\n ntrials: int, optional\n duration: float, optional\n Seconds to sleep before retrying.\n increment_type: {None, 'exponential'}\n Note that if it is exponential, duration should typically be > 1.0\n so it grows with higher power\n exceptions: Exception or tuple of Exceptions, optional\n Exception or a tuple of multiple exceptions, on which to retry\n exceptions_filter: callable, optional\n If provided, this function will be called with a caught exception\n instance. If function returns True - we will re-try, if False - exception\n will be re-raised without retrying.\n logger: callable, optional\n Logger to log upon failure. If not provided, will use stock logger\n at the level of 5 (heavy debug).\n "
if (not exceptions):
exceptions = ((OSError, WindowsError, PermissionError) if on_windows else OSError)
if (not ntrials):
ntrials = (100 if on_windows else 10)
if (logger is None):
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert (increment_type in {None, 'exponential'})
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if (exceptions_filter and (not exceptions_filter(exc))):
raise
if (trial < (ntrials - 1)):
if (increment_type == 'exponential'):
t = (duration ** (trial + 1))
logger('Caught %s on trial #%d. Sleeping %f and retrying', CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec | -414,031,308,378,374,340 | Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug). | datalad/utils.py | try_multiple_dec | AKSoo/datalad | python | @optional_args
def try_multiple_dec(f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None):
"Decorator to try function multiple times.\n\n Main purpose is to decorate functions dealing with removal of files/directories\n and which might need a few seconds to work correctly on Windows which takes\n its time to release files/directories.\n\n Parameters\n ----------\n ntrials: int, optional\n duration: float, optional\n Seconds to sleep before retrying.\n increment_type: {None, 'exponential'}\n Note that if it is exponential, duration should typically be > 1.0\n so it grows with higher power\n exceptions: Exception or tuple of Exceptions, optional\n Exception or a tuple of multiple exceptions, on which to retry\n exceptions_filter: callable, optional\n If provided, this function will be called with a caught exception\n instance. If function returns True - we will re-try, if False - exception\n will be re-raised without retrying.\n logger: callable, optional\n Logger to log upon failure. If not provided, will use stock logger\n at the level of 5 (heavy debug).\n "
if (not exceptions):
exceptions = ((OSError, WindowsError, PermissionError) if on_windows else OSError)
if (not ntrials):
ntrials = (100 if on_windows else 10)
if (logger is None):
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert (increment_type in {None, 'exponential'})
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if (exceptions_filter and (not exceptions_filter(exc))):
raise
if (trial < (ntrials - 1)):
if (increment_type == 'exponential'):
t = (duration ** (trial + 1))
logger('Caught %s on trial #%d. Sleeping %f and retrying', CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec |
@try_multiple_dec
def unlink(f):
'\'Robust\' unlink. Would try multiple times\n\n On windows boxes there is evidence for a latency of more than a second\n until a file is considered no longer "in-use".\n WindowsError is not known on Linux, and if IOError or any other\n exception\n is thrown then if except statement has WindowsError in it -- NameError\n also see gh-2533\n '
assert_no_open_files(f)
return os.unlink(f) | 758,740,387,580,010,900 | 'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533 | datalad/utils.py | unlink | AKSoo/datalad | python | @try_multiple_dec
def unlink(f):
'\'Robust\' unlink. Would try multiple times\n\n On windows boxes there is evidence for a latency of more than a second\n until a file is considered no longer "in-use".\n WindowsError is not known on Linux, and if IOError or any other\n exception\n is thrown then if except statement has WindowsError in it -- NameError\n also see gh-2533\n '
assert_no_open_files(f)
return os.unlink(f) |
@try_multiple_dec
def _rmtree(*args, **kwargs):
'Just a helper to decorate shutil.rmtree.\n\n rmtree defined above does more and ideally should not itself be decorated\n since a recursive definition and does checks for open files inside etc -\n might be too runtime expensive\n '
return shutil.rmtree(*args, **kwargs) | -4,898,703,226,461,722,000 | Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive | datalad/utils.py | _rmtree | AKSoo/datalad | python | @try_multiple_dec
def _rmtree(*args, **kwargs):
'Just a helper to decorate shutil.rmtree.\n\n rmtree defined above does more and ideally should not itself be decorated\n since a recursive definition and does checks for open files inside etc -\n might be too runtime expensive\n '
return shutil.rmtree(*args, **kwargs) |
def slash_join(base, extension):
"Join two strings with a '/', avoiding duplicate slashes\n\n If any of the strings is None the other is returned as is.\n "
if (extension is None):
return base
if (base is None):
return extension
return '/'.join((base.rstrip('/'), extension.lstrip('/'))) | 1,535,180,192,747,289,600 | Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is. | datalad/utils.py | slash_join | AKSoo/datalad | python | def slash_join(base, extension):
"Join two strings with a '/', avoiding duplicate slashes\n\n If any of the strings is None the other is returned as is.\n "
if (extension is None):
return base
if (base is None):
return extension
return '/'.join((base.rstrip('/'), extension.lstrip('/'))) |
def open_r_encdetect(fname, readahead=1000):
'Return a file object in read mode with auto-detected encoding\n\n This is helpful when dealing with files of unknown encoding.\n\n Parameters\n ----------\n readahead: int, optional\n How many bytes to read for guessing the encoding type. If\n negative - full file will be read\n '
from chardet import detect
import io
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug('Auto-detected encoding %s for file %s (confidence: %s)', denc, fname, enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc) | -4,121,653,151,508,509,000 | Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read | datalad/utils.py | open_r_encdetect | AKSoo/datalad | python | def open_r_encdetect(fname, readahead=1000):
'Return a file object in read mode with auto-detected encoding\n\n This is helpful when dealing with files of unknown encoding.\n\n Parameters\n ----------\n readahead: int, optional\n How many bytes to read for guessing the encoding type. If\n negative - full file will be read\n '
from chardet import detect
import io
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug('Auto-detected encoding %s for file %s (confidence: %s)', denc, fname, enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc) |
def read_file(fname, decode=True):
'A helper to read file passing content via ensure_unicode\n\n Parameters\n ----------\n decode: bool, optional\n if False, no ensure_unicode and file content returned as bytes\n '
with open(fname, 'rb') as f:
content = f.read()
return (ensure_unicode(content) if decode else content) | -4,267,377,191,964,235,000 | A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes | datalad/utils.py | read_file | AKSoo/datalad | python | def read_file(fname, decode=True):
'A helper to read file passing content via ensure_unicode\n\n Parameters\n ----------\n decode: bool, optional\n if False, no ensure_unicode and file content returned as bytes\n '
with open(fname, 'rb') as f:
content = f.read()
return (ensure_unicode(content) if decode else content) |
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
'A generator of dict records from a CSV/TSV\n\n Automatically guesses the encoding for each record to convert to UTF-8\n\n Parameters\n ----------\n fname: str\n Filename\n dialect: str, optional\n Dialect to specify to csv.reader. If not specified -- guessed from\n the file, if fails to guess, "excel-tab" is assumed\n readahead: int, optional\n How many bytes to read from the file to guess the type\n **kwargs\n Passed to `csv.reader`\n '
import csv
if (dialect is None):
with open(fname) as tsvfile:
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning('Could not determine file-format, assuming TSV: %s', CapturedException(exc))
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
csv_reader = csv.reader(tsvfile, dialect=dialect, **kwargs)
header = None
for row in csv_reader:
row_unicode = map(ensure_unicode, row)
if (header is None):
header = list(row_unicode)
else:
(yield dict(zip(header, row_unicode))) | -6,179,929,616,358,541,000 | A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader` | datalad/utils.py | read_csv_lines | AKSoo/datalad | python | def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
'A generator of dict records from a CSV/TSV\n\n Automatically guesses the encoding for each record to convert to UTF-8\n\n Parameters\n ----------\n fname: str\n Filename\n dialect: str, optional\n Dialect to specify to csv.reader. If not specified -- guessed from\n the file, if fails to guess, "excel-tab" is assumed\n readahead: int, optional\n How many bytes to read from the file to guess the type\n **kwargs\n Passed to `csv.reader`\n '
import csv
if (dialect is None):
with open(fname) as tsvfile:
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning('Could not determine file-format, assuming TSV: %s', CapturedException(exc))
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
csv_reader = csv.reader(tsvfile, dialect=dialect, **kwargs)
header = None
for row in csv_reader:
row_unicode = map(ensure_unicode, row)
if (header is None):
header = list(row_unicode)
else:
(yield dict(zip(header, row_unicode))) |
def import_modules(modnames, pkg, msg='Failed to import {module}', log=lgr.debug):
"Helper to import a list of modules without failing if N/A\n\n Parameters\n ----------\n modnames: list of str\n List of module names to import\n pkg: str\n Package under which to import\n msg: str, optional\n Message template for .format() to log at DEBUG level if import fails.\n Keys {module} and {package} will be provided and ': {exception}' appended\n log: callable, optional\n Logger call to use for logging messages\n "
from importlib import import_module
_globals = globals()
mods_loaded = []
if (pkg and (not (pkg in sys.modules))):
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module('.{}'.format(modname), pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(module=modname, package=pkg, exception=ce.message))
return mods_loaded | 3,748,128,079,028,701,000 | Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages | datalad/utils.py | import_modules | AKSoo/datalad | python | def import_modules(modnames, pkg, msg='Failed to import {module}', log=lgr.debug):
"Helper to import a list of modules without failing if N/A\n\n Parameters\n ----------\n modnames: list of str\n List of module names to import\n pkg: str\n Package under which to import\n msg: str, optional\n Message template for .format() to log at DEBUG level if import fails.\n Keys {module} and {package} will be provided and ': {exception}' appended\n log: callable, optional\n Logger call to use for logging messages\n "
from importlib import import_module
_globals = globals()
mods_loaded = []
if (pkg and (not (pkg in sys.modules))):
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module('.{}'.format(modname), pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(module=modname, package=pkg, exception=ce.message))
return mods_loaded |
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
'Import provided module given a path\n\n TODO:\n - RF/make use of it in pipeline.py which has similar logic\n - join with import_modules above?\n\n Parameters\n ----------\n pkg: module, optional\n If provided, and modpath is under pkg.__path__, relative import will be\n used\n '
assert modpath.endswith('.py')
log(('Importing %s' % modpath))
modname = basename(modpath)[:(- 3)]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
relmodpath = ('.' + relpath(modpath[:(- 3)], pkgpath).replace(sep, '.'))
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if (dirname_ in sys.path):
sys.path.pop(sys.path.index(dirname_))
else:
log(('Expected path %s to be within sys.path, but it was gone!' % dirname_))
except Exception as e:
raise RuntimeError(('Failed to import module from %s' % modpath)) from e
return mod | 9,020,425,505,208,875,000 | Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used | datalad/utils.py | import_module_from_file | AKSoo/datalad | python | def import_module_from_file(modpath, pkg=None, log=lgr.debug):
'Import provided module given a path\n\n TODO:\n - RF/make use of it in pipeline.py which has similar logic\n - join with import_modules above?\n\n Parameters\n ----------\n pkg: module, optional\n If provided, and modpath is under pkg.__path__, relative import will be\n used\n '
assert modpath.endswith('.py')
log(('Importing %s' % modpath))
modname = basename(modpath)[:(- 3)]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
relmodpath = ('.' + relpath(modpath[:(- 3)], pkgpath).replace(sep, '.'))
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if (dirname_ in sys.path):
sys.path.pop(sys.path.index(dirname_))
else:
log(('Expected path %s to be within sys.path, but it was gone!' % dirname_))
except Exception as e:
raise RuntimeError(('Failed to import module from %s' % modpath)) from e
return mod |
def get_encoding_info():
'Return a dictionary with various encoding/locale information'
import sys, locale
from collections import OrderedDict
return OrderedDict([('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding())]) | -5,240,476,570,044,784,000 | Return a dictionary with various encoding/locale information | datalad/utils.py | get_encoding_info | AKSoo/datalad | python | def get_encoding_info():
import sys, locale
from collections import OrderedDict
return OrderedDict([('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding())]) |
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
'Given an archive `name`, create under `path` with specified `load` tree\n '
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite)
rmtree(full_dirname) | 4,681,706,310,525,413,000 | Given an archive `name`, create under `path` with specified `load` tree | datalad/utils.py | create_tree_archive | AKSoo/datalad | python | def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
'\n '
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite)
rmtree(full_dirname) |
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
'Given a list of tuples (name, load) create such a tree\n\n if load is a tuple itself -- that would create either a subtree or an archive\n with that content and place it into the tree if name ends with .tar.gz\n '
lgr.log(5, 'Creating a tree under %s', path)
if (not exists(path)):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for (file_, load) in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if (remove_existing and lexists(full_name)):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if (name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip')):
create_tree_archive(path, name, load, archives_leading_dir=archives_leading_dir)
else:
create_tree(full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif (full_name.split('.')[(- 1)] in ('xz', 'lzma')):
import lzma
open_func = lzma.open
with open_func(full_name, 'wb') as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, (os.stat(full_name).st_mode | stat.S_IEXEC)) | -7,951,379,912,310,503,000 | Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz | datalad/utils.py | create_tree | AKSoo/datalad | python | def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
'Given a list of tuples (name, load) create such a tree\n\n if load is a tuple itself -- that would create either a subtree or an archive\n with that content and place it into the tree if name ends with .tar.gz\n '
lgr.log(5, 'Creating a tree under %s', path)
if (not exists(path)):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for (file_, load) in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if (remove_existing and lexists(full_name)):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if (name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip')):
create_tree_archive(path, name, load, archives_leading_dir=archives_leading_dir)
else:
create_tree(full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif (full_name.split('.')[(- 1)] in ('xz', 'lzma')):
import lzma
open_func = lzma.open
with open_func(full_name, 'wb') as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, (os.stat(full_name).st_mode | stat.S_IEXEC)) |
def get_suggestions_msg(values, known, sep='\n '):
'Return a formatted string with suggestions for values given the known ones\n '
import difflib
suggestions = []
for value in ensure_list(values):
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = 'Did you mean any of these?'
if suggestions:
if ('\n' in sep):
msg += sep
else:
msg += ' '
return (msg + ('%s\n' % sep.join(suggestions)))
return '' | -2,174,618,792,934,269,700 | Return a formatted string with suggestions for values given the known ones | datalad/utils.py | get_suggestions_msg | AKSoo/datalad | python | def get_suggestions_msg(values, known, sep='\n '):
'\n '
import difflib
suggestions = []
for value in ensure_list(values):
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = 'Did you mean any of these?'
if suggestions:
if ('\n' in sep):
msg += sep
else:
msg += ' '
return (msg + ('%s\n' % sep.join(suggestions)))
return |
def bytes2human(n, format='%(value).1f %(symbol)sB'):
'\n Convert n bytes into a human readable string based on format.\n symbols can be either "customary", "customary_ext", "iec" or "iec_ext",\n see: http://goo.gl/kTQMs\n\n >>> from datalad.utils import bytes2human\n >>> bytes2human(1)\n \'1.0 B\'\n >>> bytes2human(1024)\n \'1.0 KB\'\n >>> bytes2human(1048576)\n \'1.0 MB\'\n >>> bytes2human(1099511627776127398123789121)\n \'909.5 YB\'\n\n >>> bytes2human(10000, "%(value).1f %(symbol)s/sec")\n \'9.8 K/sec\'\n\n >>> # precision can be adjusted by playing with %f operator\n >>> bytes2human(10000, format="%(value).5f %(symbol)s")\n \'9.76562 K\'\n\n Taken from: http://goo.gl/kTQMs and subsequently simplified\n Original Author: Giampaolo Rodola\' <g.rodola [AT] gmail [DOT] com>\n License: MIT\n '
n = int(n)
if (n < 0):
raise ValueError('n < 0')
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for (i, s) in enumerate(symbols[1:]):
prefix[s] = (1 << ((i + 1) * 10))
for symbol in reversed(symbols[1:]):
if (n >= prefix[symbol]):
value = (float(n) / prefix[symbol])
return (format % locals())
return (format % dict(symbol=symbols[0], value=n)) | -7,311,965,313,156,211,000 | Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT | datalad/utils.py | bytes2human | AKSoo/datalad | python | def bytes2human(n, format='%(value).1f %(symbol)sB'):
'\n Convert n bytes into a human readable string based on format.\n symbols can be either "customary", "customary_ext", "iec" or "iec_ext",\n see: http://goo.gl/kTQMs\n\n >>> from datalad.utils import bytes2human\n >>> bytes2human(1)\n \'1.0 B\'\n >>> bytes2human(1024)\n \'1.0 KB\'\n >>> bytes2human(1048576)\n \'1.0 MB\'\n >>> bytes2human(1099511627776127398123789121)\n \'909.5 YB\'\n\n >>> bytes2human(10000, "%(value).1f %(symbol)s/sec")\n \'9.8 K/sec\'\n\n >>> # precision can be adjusted by playing with %f operator\n >>> bytes2human(10000, format="%(value).5f %(symbol)s")\n \'9.76562 K\'\n\n Taken from: http://goo.gl/kTQMs and subsequently simplified\n Original Author: Giampaolo Rodola\' <g.rodola [AT] gmail [DOT] com>\n License: MIT\n '
n = int(n)
if (n < 0):
raise ValueError('n < 0')
symbols = (, 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for (i, s) in enumerate(symbols[1:]):
prefix[s] = (1 << ((i + 1) * 10))
for symbol in reversed(symbols[1:]):
if (n >= prefix[symbol]):
value = (float(n) / prefix[symbol])
return (format % locals())
return (format % dict(symbol=symbols[0], value=n)) |
def quote_cmdlinearg(arg):
'Perform platform-appropriate argument quoting'
return ('"{}"'.format(arg.replace('"', '""')) if on_windows else shlex_quote(arg)) | 9,031,621,463,784,785,000 | Perform platform-appropriate argument quoting | datalad/utils.py | quote_cmdlinearg | AKSoo/datalad | python | def quote_cmdlinearg(arg):
return ('"{}"'.format(arg.replace('"', '')) if on_windows else shlex_quote(arg)) |
def guard_for_format(arg):
"Replace { and } with {{ and }}\n\n To be used in cases if arg is not expected to have provided\n by user .format() placeholders, but 'arg' might become a part\n of a composite passed to .format(), e.g. via 'Run'\n "
return arg.replace('{', '{{').replace('}', '}}') | 4,900,778,269,469,641,000 | Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run' | datalad/utils.py | guard_for_format | AKSoo/datalad | python | def guard_for_format(arg):
"Replace { and } with {{ and }}\n\n To be used in cases if arg is not expected to have provided\n by user .format() placeholders, but 'arg' might become a part\n of a composite passed to .format(), e.g. via 'Run'\n "
return arg.replace('{', '{{').replace('}', '}}') |
def join_cmdline(args):
'Join command line args into a string using quote_cmdlinearg\n '
return ' '.join(map(quote_cmdlinearg, args)) | -8,821,647,151,772,595,000 | Join command line args into a string using quote_cmdlinearg | datalad/utils.py | join_cmdline | AKSoo/datalad | python | def join_cmdline(args):
'\n '
return ' '.join(map(quote_cmdlinearg, args)) |
def split_cmdline(s):
'Perform platform-appropriate command line splitting.\n\n Identical to `shlex.split()` on non-windows platforms.\n\n Modified from https://stackoverflow.com/a/35900070\n '
if (not on_windows):
return shlex_split(s)
RE_CMD_LEX = '"((?:""|\\\\["\\\\]|[^"])*)"?()|(\\\\\\\\(?=\\\\*")|\\\\")|(&&?|\\|\\|?|\\d?>|[<])|([^\\s"&|<>]+)|(\\s+)|(.)'
args = []
accu = None
for (qs, qss, esc, pipe, word, white, fail) in re.findall(RE_CMD_LEX, s):
if word:
pass
elif esc:
word = esc[1]
elif (white or pipe):
if (accu is not None):
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError('invalid or incomplete shell string')
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if (platform == 0):
word = word.replace('""', '"')
else:
word = qss
accu = ((accu or '') + word)
if (accu is not None):
args.append(accu)
return args | 6,851,305,332,115,587,000 | Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070 | datalad/utils.py | split_cmdline | AKSoo/datalad | python | def split_cmdline(s):
'Perform platform-appropriate command line splitting.\n\n Identical to `shlex.split()` on non-windows platforms.\n\n Modified from https://stackoverflow.com/a/35900070\n '
if (not on_windows):
return shlex_split(s)
RE_CMD_LEX = '"((?:|\\\\["\\\\]|[^"])*)"?()|(\\\\\\\\(?=\\\\*")|\\\\")|(&&?|\\|\\|?|\\d?>|[<])|([^\\s"&|<>]+)|(\\s+)|(.)'
args = []
accu = None
for (qs, qss, esc, pipe, word, white, fail) in re.findall(RE_CMD_LEX, s):
if word:
pass
elif esc:
word = esc[1]
elif (white or pipe):
if (accu is not None):
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError('invalid or incomplete shell string')
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if (platform == 0):
word = word.replace('', '"')
else:
word = qss
accu = ((accu or ) + word)
if (accu is not None):
args.append(accu)
return args |
def get_wrapped_class(wrapped):
'Determine the command class a wrapped __call__ belongs to'
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[(- 2)]
_func_class = mod.__dict__[command_class_name]
lgr.debug('Determined class of decorated function: %s', _func_class)
return _func_class | -6,224,155,212,005,527,000 | Determine the command class a wrapped __call__ belongs to | datalad/utils.py | get_wrapped_class | AKSoo/datalad | python | def get_wrapped_class(wrapped):
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[(- 2)]
_func_class = mod.__dict__[command_class_name]
lgr.debug('Determined class of decorated function: %s', _func_class)
return _func_class |
def check_symlink_capability(path, target):
"helper similar to datalad.tests.utils.has_symlink_capability\n\n However, for use in a datalad command context, we shouldn't\n assume to be able to write to tmpfile and also not import a whole lot from\n datalad's test machinery. Finally, we want to know, whether we can create a\n symlink at a specific location, not just somewhere. Therefore use\n arbitrary path to test-build a symlink and delete afterwards. Suitable\n location can therefore be determined by high lever code.\n\n Parameters\n ----------\n path: Path\n target: Path\n\n Returns\n -------\n bool\n "
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() | 1,218,640,300,326,594,300 | helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool | datalad/utils.py | check_symlink_capability | AKSoo/datalad | python | def check_symlink_capability(path, target):
"helper similar to datalad.tests.utils.has_symlink_capability\n\n However, for use in a datalad command context, we shouldn't\n assume to be able to write to tmpfile and also not import a whole lot from\n datalad's test machinery. Finally, we want to know, whether we can create a\n symlink at a specific location, not just somewhere. Therefore use\n arbitrary path to test-build a symlink and delete afterwards. Suitable\n location can therefore be determined by high lever code.\n\n Parameters\n ----------\n path: Path\n target: Path\n\n Returns\n -------\n bool\n "
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink() |
def lmtime(filepath, mtime):
'Set mtime for files. On Windows a merely adapter to os.utime\n '
os.utime(filepath, (time.time(), mtime)) | 473,848,352,068,869,500 | Set mtime for files. On Windows a merely adapter to os.utime | datalad/utils.py | lmtime | AKSoo/datalad | python | def lmtime(filepath, mtime):
'\n '
os.utime(filepath, (time.time(), mtime)) |
def lmtime(filepath, mtime):
'Set mtime for files, while not de-referencing symlinks.\n\n To overcome absence of os.lutime\n\n Works only on linux and OSX ATM\n '
from .cmd import WitlessRunner
smtime = time.strftime('%Y%m%d%H%M.%S', time.localtime(mtime))
lgr.log(3, 'Setting mtime for %s to %s == %s', filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', ('%s' % smtime), filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if (filepath.is_symlink() and rfilepath.exists()):
lgr.log(3, 'File is a symlink to %s Setting mtime for it to %s', rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime)) | 1,039,836,322,329,771,400 | Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM | datalad/utils.py | lmtime | AKSoo/datalad | python | def lmtime(filepath, mtime):
'Set mtime for files, while not de-referencing symlinks.\n\n To overcome absence of os.lutime\n\n Works only on linux and OSX ATM\n '
from .cmd import WitlessRunner
smtime = time.strftime('%Y%m%d%H%M.%S', time.localtime(mtime))
lgr.log(3, 'Setting mtime for %s to %s == %s', filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', ('%s' % smtime), filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if (filepath.is_symlink() and rfilepath.exists()):
lgr.log(3, 'File is a symlink to %s Setting mtime for it to %s', rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime)) |
def format_element(self, elem, format_spec):
'Format a single element\n\n For sequences, this is called once for each element in a\n sequence. For anything else, it is called on the entire\n object. It is intended to be overridden in subclases.\n '
return self.element_formatter.format_field(elem, format_spec) | 6,269,833,802,502,472,000 | Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases. | datalad/utils.py | format_element | AKSoo/datalad | python | def format_element(self, elem, format_spec):
'Format a single element\n\n For sequences, this is called once for each element in a\n sequence. For anything else, it is called on the entire\n object. It is intended to be overridden in subclases.\n '
return self.element_formatter.format_field(elem, format_spec) |
def __init__(self, name, executable=False):
'\n\n Parameters\n ----------\n name : str\n Name of the file\n executable: bool, optional\n Make it executable\n '
self.name = name
self.executable = executable | -4,959,471,173,376,464,000 | Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable | datalad/utils.py | __init__ | AKSoo/datalad | python | def __init__(self, name, executable=False):
'\n\n Parameters\n ----------\n name : str\n Name of the file\n executable: bool, optional\n Make it executable\n '
self.name = name
self.executable = executable |
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
'Provide assertion on whether a msg was logged at a given level\n\n If neither `msg` nor `level` provided, checks if anything was logged\n at all.\n\n Parameters\n ----------\n msg: str, optional\n Message (as a regular expression, if `regex`) to be searched.\n If no msg provided, checks if anything was logged at a given level.\n level: str, optional\n String representing the level to be logged\n regex: bool, optional\n If False, regular `assert_in` is used\n **kwargs: str, optional\n Passed to `assert_re_in` or `assert_in`\n '
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = (('\\[%s\\] ' % level) if level else '\\[\\S+\\] ')
else:
match = (('[%s] ' % level) if level else '')
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert (not kwargs), 'no kwargs to be passed anywhere'
assert self.out, 'Nothing was logged!?' | -3,269,136,505,347,764,700 | Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in` | datalad/utils.py | assert_logged | AKSoo/datalad | python | def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
'Provide assertion on whether a msg was logged at a given level\n\n If neither `msg` nor `level` provided, checks if anything was logged\n at all.\n\n Parameters\n ----------\n msg: str, optional\n Message (as a regular expression, if `regex`) to be searched.\n If no msg provided, checks if anything was logged at a given level.\n level: str, optional\n String representing the level to be logged\n regex: bool, optional\n If False, regular `assert_in` is used\n **kwargs: str, optional\n Passed to `assert_re_in` or `assert_in`\n '
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = (('\\[%s\\] ' % level) if level else '\\[\\S+\\] ')
else:
match = (('[%s] ' % level) if level else )
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert (not kwargs), 'no kwargs to be passed anywhere'
assert self.out, 'Nothing was logged!?' |
@cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'payload': (DeleteOryAccessControlPolicyInternalServerErrorBody,)} | 592,781,851,320,592,800 | This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type. | clients/keto/python/ory_keto_client/model/delete_ory_access_control_policy_internal_server_error.py | openapi_types | Stackwalkerllc/sdk | python | @cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'payload': (DeleteOryAccessControlPolicyInternalServerErrorBody,)} |
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
'DeleteOryAccessControlPolicyInternalServerError - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n payload (DeleteOryAccessControlPolicyInternalServerErrorBody): [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value) | -3,289,304,096,865,934,000 | DeleteOryAccessControlPolicyInternalServerError - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
payload (DeleteOryAccessControlPolicyInternalServerErrorBody): [optional] # noqa: E501 | clients/keto/python/ory_keto_client/model/delete_ory_access_control_policy_internal_server_error.py | __init__ | Stackwalkerllc/sdk | python | @convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
'DeleteOryAccessControlPolicyInternalServerError - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n payload (DeleteOryAccessControlPolicyInternalServerErrorBody): [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value) |
def set_line_str(self, line_str):
'\n Set line_str.\n\n Line_str is only writeable if LogEvent was created from a string,\n not from a system.profile documents.\n '
if (not self.from_string):
raise ValueError("can't set line_str for LogEvent created from system.profile documents.")
if (line_str != self._line_str):
self._line_str = line_str.rstrip()
self._reset() | 7,303,293,722,448,755,000 | Set line_str.
Line_str is only writeable if LogEvent was created from a string,
not from a system.profile documents. | mtools/util/logevent.py | set_line_str | sindbach/mtools | python | def set_line_str(self, line_str):
'\n Set line_str.\n\n Line_str is only writeable if LogEvent was created from a string,\n not from a system.profile documents.\n '
if (not self.from_string):
raise ValueError("can't set line_str for LogEvent created from system.profile documents.")
if (line_str != self._line_str):
self._line_str = line_str.rstrip()
self._reset() |
def get_line_str(self):
'Return line_str depending on source, logfile or system.profile.'
if self.from_string:
return ' '.join([s for s in [self.merge_marker_str, self._datetime_str, self._line_str] if s])
else:
return ' '.join([s for s in [self._datetime_str, self._line_str] if s]) | -4,736,257,944,441,563,000 | Return line_str depending on source, logfile or system.profile. | mtools/util/logevent.py | get_line_str | sindbach/mtools | python | def get_line_str(self):
if self.from_string:
return ' '.join([s for s in [self.merge_marker_str, self._datetime_str, self._line_str] if s])
else:
return ' '.join([s for s in [self._datetime_str, self._line_str] if s]) |
@property
def split_tokens(self):
'Split string into tokens (lazy).'
if (not self._split_tokens_calculated):
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens | 5,830,234,605,733,159,000 | Split string into tokens (lazy). | mtools/util/logevent.py | split_tokens | sindbach/mtools | python | @property
def split_tokens(self):
if (not self._split_tokens_calculated):
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens |
@property
def duration(self):
'Calculate duration if available (lazy).'
if (not self._duration_calculated):
self._duration_calculated = True
line_str = self.line_str
if (line_str and line_str.endswith('ms') and ('Scheduled new oplog query' not in line_str)):
try:
space_pos = line_str.rfind(' ')
if (space_pos == (- 1)):
return
self._duration = int(line_str[(line_str.rfind(' ') + 1):(- 2)].replace(',', ''))
except ValueError:
self._duration = None
elif ('flushing' in self.line_str):
matchobj = re.search('flushing mmaps took (\\d+)ms', self.line_str)
if matchobj:
self._duration = int(matchobj.group(1))
return self._duration | -6,191,071,677,299,454,000 | Calculate duration if available (lazy). | mtools/util/logevent.py | duration | sindbach/mtools | python | @property
def duration(self):
if (not self._duration_calculated):
self._duration_calculated = True
line_str = self.line_str
if (line_str and line_str.endswith('ms') and ('Scheduled new oplog query' not in line_str)):
try:
space_pos = line_str.rfind(' ')
if (space_pos == (- 1)):
return
self._duration = int(line_str[(line_str.rfind(' ') + 1):(- 2)].replace(',', ))
except ValueError:
self._duration = None
elif ('flushing' in self.line_str):
matchobj = re.search('flushing mmaps took (\\d+)ms', self.line_str)
if matchobj:
self._duration = int(matchobj.group(1))
return self._duration |
@property
def datetime(self):
'Extract datetime if available (lazy).'
if (not self._datetime_calculated):
self._datetime_calculated = True
split_tokens = self.split_tokens[:10]
for offs in range(len(split_tokens)):
dt = self._match_datetime_pattern(split_tokens[offs:(offs + 4)])
if dt:
self._datetime = dt
self._datetime_nextpos = offs
if self._datetime_format.startswith('iso8601'):
self._datetime_nextpos += 1
else:
self._datetime_nextpos += 4
self._line_str = ' '.join(self.split_tokens[self._datetime_nextpos:])
if self.level:
self._datetime_nextpos += 2
self._reformat_timestamp(self._datetime_format)
break
return self._datetime | 7,229,479,786,862,700,000 | Extract datetime if available (lazy). | mtools/util/logevent.py | datetime | sindbach/mtools | python | @property
def datetime(self):
if (not self._datetime_calculated):
self._datetime_calculated = True
split_tokens = self.split_tokens[:10]
for offs in range(len(split_tokens)):
dt = self._match_datetime_pattern(split_tokens[offs:(offs + 4)])
if dt:
self._datetime = dt
self._datetime_nextpos = offs
if self._datetime_format.startswith('iso8601'):
self._datetime_nextpos += 1
else:
self._datetime_nextpos += 4
self._line_str = ' '.join(self.split_tokens[self._datetime_nextpos:])
if self.level:
self._datetime_nextpos += 2
self._reformat_timestamp(self._datetime_format)
break
return self._datetime |
def _match_datetime_pattern(self, tokens):
"\n Match the datetime pattern at the beginning of the token list.\n\n There are several formats that this method needs to understand\n and distinguish between (see MongoDB's SERVER-7965):\n\n ctime-pre2.4 Wed Dec 31 19:00:00\n ctime Wed Dec 31 19:00:00.000\n iso8601-utc 1970-01-01T00:00:00.000Z\n iso8601-local 1969-12-31T19:00:00.000+0500\n "
assume_iso8601_format = (len(tokens) < 4)
if (not assume_iso8601_format):
(weekday, month, day, time) = tokens[:4]
if ((len(tokens) < 4) or (weekday not in self.weekdays) or (month not in self.months) or (not day.isdigit())):
assume_iso8601_format = True
if assume_iso8601_format:
if (not re.match('\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}', tokens[0])):
return None
dt = dateutil.parser.parse(tokens[0])
self._datetime_format = ('iso8601-utc' if tokens[0].endswith('Z') else 'iso8601-local')
else:
year = datetime.now().year
dt = dateutil.parser.parse(' '.join(tokens[:4]), default=datetime(year, 1, 1))
if (dt.tzinfo is None):
dt = dt.replace(tzinfo=tzutc())
if (self._year_rollover and (dt > self._year_rollover)):
dt = dt.replace(year=(year - 1))
self._datetime_format = ('ctime' if ('.' in tokens[3]) else 'ctime-pre2.4')
return dt | 2,427,690,610,165,711,000 | Match the datetime pattern at the beginning of the token list.
There are several formats that this method needs to understand
and distinguish between (see MongoDB's SERVER-7965):
ctime-pre2.4 Wed Dec 31 19:00:00
ctime Wed Dec 31 19:00:00.000
iso8601-utc 1970-01-01T00:00:00.000Z
iso8601-local 1969-12-31T19:00:00.000+0500 | mtools/util/logevent.py | _match_datetime_pattern | sindbach/mtools | python | def _match_datetime_pattern(self, tokens):
"\n Match the datetime pattern at the beginning of the token list.\n\n There are several formats that this method needs to understand\n and distinguish between (see MongoDB's SERVER-7965):\n\n ctime-pre2.4 Wed Dec 31 19:00:00\n ctime Wed Dec 31 19:00:00.000\n iso8601-utc 1970-01-01T00:00:00.000Z\n iso8601-local 1969-12-31T19:00:00.000+0500\n "
assume_iso8601_format = (len(tokens) < 4)
if (not assume_iso8601_format):
(weekday, month, day, time) = tokens[:4]
if ((len(tokens) < 4) or (weekday not in self.weekdays) or (month not in self.months) or (not day.isdigit())):
assume_iso8601_format = True
if assume_iso8601_format:
if (not re.match('\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}', tokens[0])):
return None
dt = dateutil.parser.parse(tokens[0])
self._datetime_format = ('iso8601-utc' if tokens[0].endswith('Z') else 'iso8601-local')
else:
year = datetime.now().year
dt = dateutil.parser.parse(' '.join(tokens[:4]), default=datetime(year, 1, 1))
if (dt.tzinfo is None):
dt = dt.replace(tzinfo=tzutc())
if (self._year_rollover and (dt > self._year_rollover)):
dt = dt.replace(year=(year - 1))
self._datetime_format = ('ctime' if ('.' in tokens[3]) else 'ctime-pre2.4')
return dt |
@property
def thread(self):
'Extract thread name if available (lazy).'
if (not self._thread_calculated):
self._thread_calculated = True
split_tokens = self.split_tokens
if (not self.datetime_nextpos):
return None
if (len(split_tokens) <= self.datetime_nextpos):
return None
connection_token = split_tokens[self.datetime_nextpos]
match = re.match('^\\[([^\\]]*)\\]$', connection_token)
if match:
self._thread = match.group(1)
if (self._thread is not None):
if (self._thread in ['initandlisten', 'mongosMain']):
if ((len(split_tokens) >= 5) and (split_tokens[(- 5)][0] == '#')):
self._conn = ('conn' + split_tokens[(- 5)][1:])
elif self._thread.startswith('conn'):
self._conn = self._thread
return self._thread | 5,699,371,472,807,143,000 | Extract thread name if available (lazy). | mtools/util/logevent.py | thread | sindbach/mtools | python | @property
def thread(self):
if (not self._thread_calculated):
self._thread_calculated = True
split_tokens = self.split_tokens
if (not self.datetime_nextpos):
return None
if (len(split_tokens) <= self.datetime_nextpos):
return None
connection_token = split_tokens[self.datetime_nextpos]
match = re.match('^\\[([^\\]]*)\\]$', connection_token)
if match:
self._thread = match.group(1)
if (self._thread is not None):
if (self._thread in ['initandlisten', 'mongosMain']):
if ((len(split_tokens) >= 5) and (split_tokens[(- 5)][0] == '#')):
self._conn = ('conn' + split_tokens[(- 5)][1:])
elif self._thread.startswith('conn'):
self._conn = self._thread
return self._thread |
@property
def conn(self):
"\n Extract conn name if available (lazy).\n\n This value is None for all lines except the log lines related to\n connections, that is lines matching '\\[conn[0-9]+\\]' or\n '\\[(initandlisten|mongosMain)\\] .* connection accepted from'.\n "
self.thread
return self._conn | 5,706,740,834,924,622,000 | Extract conn name if available (lazy).
This value is None for all lines except the log lines related to
connections, that is lines matching '\[conn[0-9]+\]' or
'\[(initandlisten|mongosMain)\] .* connection accepted from'. | mtools/util/logevent.py | conn | sindbach/mtools | python | @property
def conn(self):
"\n Extract conn name if available (lazy).\n\n This value is None for all lines except the log lines related to\n connections, that is lines matching '\\[conn[0-9]+\\]' or\n '\\[(initandlisten|mongosMain)\\] .* connection accepted from'.\n "
self.thread
return self._conn |
@property
def operation(self):
'\n Extract operation if available (lazy).\n\n Operations: query, insert, update, remove, getmore, command\n '
if (not self._operation_calculated):
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._operation | -3,139,542,630,484,868,600 | Extract operation if available (lazy).
Operations: query, insert, update, remove, getmore, command | mtools/util/logevent.py | operation | sindbach/mtools | python | @property
def operation(self):
'\n Extract operation if available (lazy).\n\n Operations: query, insert, update, remove, getmore, command\n '
if (not self._operation_calculated):
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._operation |
@property
def namespace(self):
'Extract namespace if available (lazy).'
if (not self._operation_calculated):
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._namespace | -8,693,397,654,284,354,000 | Extract namespace if available (lazy). | mtools/util/logevent.py | namespace | sindbach/mtools | python | @property
def namespace(self):
if (not self._operation_calculated):
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._namespace |
def _extract_operation_and_namespace(self):
"\n Helper method to extract both operation and namespace from a logevent.\n\n It doesn't make sense to only extract one as they appear back to back\n in the token list.\n "
split_tokens = self.split_tokens
if (not self._datetime_nextpos):
_ = self.thread
if ((not self._datetime_nextpos) or (len(split_tokens) <= (self._datetime_nextpos + 2))):
return
op = split_tokens[(self._datetime_nextpos + 1)].lower()
if (op == 'warning:'):
if (('warning: log line attempted' in self._line_str) and ('over max size' in self._line_str)):
self._datetime_nextpos = split_tokens.index('...')
op = split_tokens[(self._datetime_nextpos + 1)]
else:
return
if (op in self.log_operations):
self._operation = op
self._namespace = split_tokens[(self._datetime_nextpos + 2)] | 6,718,564,088,524,435,000 | Helper method to extract both operation and namespace from a logevent.
It doesn't make sense to only extract one as they appear back to back
in the token list. | mtools/util/logevent.py | _extract_operation_and_namespace | sindbach/mtools | python | def _extract_operation_and_namespace(self):
"\n Helper method to extract both operation and namespace from a logevent.\n\n It doesn't make sense to only extract one as they appear back to back\n in the token list.\n "
split_tokens = self.split_tokens
if (not self._datetime_nextpos):
_ = self.thread
if ((not self._datetime_nextpos) or (len(split_tokens) <= (self._datetime_nextpos + 2))):
return
op = split_tokens[(self._datetime_nextpos + 1)].lower()
if (op == 'warning:'):
if (('warning: log line attempted' in self._line_str) and ('over max size' in self._line_str)):
self._datetime_nextpos = split_tokens.index('...')
op = split_tokens[(self._datetime_nextpos + 1)]
else:
return
if (op in self.log_operations):
self._operation = op
self._namespace = split_tokens[(self._datetime_nextpos + 2)] |
@property
def pattern(self):
'Extract query pattern from operations.'
if (not self._pattern):
if ((self.operation in ['query', 'getmore', 'update', 'remove']) or (self.command in ['count', 'findandmodify'])):
self._pattern = self._find_pattern('query: ')
elif (self.command == 'find'):
self._pattern = self._find_pattern('filter: ')
return self._pattern | 3,728,147,135,323,575,300 | Extract query pattern from operations. | mtools/util/logevent.py | pattern | sindbach/mtools | python | @property
def pattern(self):
if (not self._pattern):
if ((self.operation in ['query', 'getmore', 'update', 'remove']) or (self.command in ['count', 'findandmodify'])):
self._pattern = self._find_pattern('query: ')
elif (self.command == 'find'):
self._pattern = self._find_pattern('filter: ')
return self._pattern |
@property
def sort_pattern(self):
'Extract query pattern from operations.'
if (not self._sort_pattern):
if (self.operation in ['query', 'getmore']):
self._sort_pattern = self._find_pattern('orderby: ')
return self._sort_pattern | -5,626,022,467,865,773,000 | Extract query pattern from operations. | mtools/util/logevent.py | sort_pattern | sindbach/mtools | python | @property
def sort_pattern(self):
if (not self._sort_pattern):
if (self.operation in ['query', 'getmore']):
self._sort_pattern = self._find_pattern('orderby: ')
return self._sort_pattern |
@property
def command(self):
'Extract query pattern from operations.'
if (not self._command_calculated):
self._command_calculated = True
if (self.operation == 'command'):
try:
command_idx = self.split_tokens.index('command:')
command = self.split_tokens[(command_idx + 1)]
if (command == '{'):
command = self.split_tokens[(command_idx + 2)][:(- 1)]
self._command = command.lower()
except ValueError:
pass
return self._command | 3,725,386,157,492,850,000 | Extract query pattern from operations. | mtools/util/logevent.py | command | sindbach/mtools | python | @property
def command(self):
if (not self._command_calculated):
self._command_calculated = True
if (self.operation == 'command'):
try:
command_idx = self.split_tokens.index('command:')
command = self.split_tokens[(command_idx + 1)]
if (command == '{'):
command = self.split_tokens[(command_idx + 2)][:(- 1)]
self._command = command.lower()
except ValueError:
pass
return self._command |
@property
def nscanned(self):
'Extract nscanned or keysExamined counter if available (lazy).'
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._nscanned | 1,807,220,756,329,251,600 | Extract nscanned or keysExamined counter if available (lazy). | mtools/util/logevent.py | nscanned | sindbach/mtools | python | @property
def nscanned(self):
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._nscanned |
@property
def nscannedObjects(self):
'\n Extract counters if available (lazy).\n\n Looks for nscannedObjects or docsExamined.\n '
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._nscannedObjects | -7,772,727,705,828,972,000 | Extract counters if available (lazy).
Looks for nscannedObjects or docsExamined. | mtools/util/logevent.py | nscannedObjects | sindbach/mtools | python | @property
def nscannedObjects(self):
'\n Extract counters if available (lazy).\n\n Looks for nscannedObjects or docsExamined.\n '
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._nscannedObjects |
@property
def ntoreturn(self):
'Extract ntoreturn counter if available (lazy).'
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn | 1,006,731,818,353,051,600 | Extract ntoreturn counter if available (lazy). | mtools/util/logevent.py | ntoreturn | sindbach/mtools | python | @property
def ntoreturn(self):
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn |
@property
def writeConflicts(self):
'Extract ntoreturn counter if available (lazy).'
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._writeConflicts | 3,734,549,808,003,737,600 | Extract ntoreturn counter if available (lazy). | mtools/util/logevent.py | writeConflicts | sindbach/mtools | python | @property
def writeConflicts(self):
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._writeConflicts |
@property
def nreturned(self):
'\n Extract counters if available (lazy).\n\n Looks for nreturned, nReturned, or nMatched counter.\n '
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._nreturned | 8,576,279,460,733,580,000 | Extract counters if available (lazy).
Looks for nreturned, nReturned, or nMatched counter. | mtools/util/logevent.py | nreturned | sindbach/mtools | python | @property
def nreturned(self):
'\n Extract counters if available (lazy).\n\n Looks for nreturned, nReturned, or nMatched counter.\n '
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._nreturned |
@property
def ninserted(self):
'Extract ninserted or nInserted counter if available (lazy).'
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._ninserted | -5,332,214,218,849,756,000 | Extract ninserted or nInserted counter if available (lazy). | mtools/util/logevent.py | ninserted | sindbach/mtools | python | @property
def ninserted(self):
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._ninserted |
@property
def ndeleted(self):
'Extract ndeleted or nDeleted counter if available (lazy).'
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._ndeleted | -5,896,970,563,387,445,000 | Extract ndeleted or nDeleted counter if available (lazy). | mtools/util/logevent.py | ndeleted | sindbach/mtools | python | @property
def ndeleted(self):
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._ndeleted |
@property
def nupdated(self):
'Extract nupdated or nModified counter if available (lazy).'
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._nupdated | 2,019,395,670,723,748,600 | Extract nupdated or nModified counter if available (lazy). | mtools/util/logevent.py | nupdated | sindbach/mtools | python | @property
def nupdated(self):
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._nupdated |
@property
def numYields(self):
'Extract numYields counter if available (lazy).'
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._numYields | 6,802,860,109,620,639,000 | Extract numYields counter if available (lazy). | mtools/util/logevent.py | numYields | sindbach/mtools | python | @property
def numYields(self):
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._numYields |
@property
def planSummary(self):
'Extract numYields counter if available (lazy).'
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._planSummary | -8,060,062,694,230,500,000 | Extract numYields counter if available (lazy). | mtools/util/logevent.py | planSummary | sindbach/mtools | python | @property
def planSummary(self):
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._planSummary |
@property
def r(self):
'Extract read lock (r) counter if available (lazy).'
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._r | -7,113,388,393,597,944,000 | Extract read lock (r) counter if available (lazy). | mtools/util/logevent.py | r | sindbach/mtools | python | @property
def r(self):
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._r |
@property
def w(self):
'Extract write lock (w) counter if available (lazy).'
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._w | -4,226,283,736,360,890,400 | Extract write lock (w) counter if available (lazy). | mtools/util/logevent.py | w | sindbach/mtools | python | @property
def w(self):
if (not self._counters_calculated):
self._counters_calculated = True
self._extract_counters()
return self._w |
def _extract_counters(self):
'Extract counters like nscanned and nreturned from the logevent.'
counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned', 'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields', 'planSummary', 'writeConflicts', 'keyUpdates']
counter_equiv = {'docsExamined': 'nscannedObjects', 'keysExamined': 'nscanned', 'nDeleted': 'ndeleted', 'nInserted': 'ninserted', 'nMatched': 'nreturned', 'nModified': 'nupdated'}
counters.extend(counter_equiv.keys())
split_tokens = self.split_tokens
if self.operation:
for (t, token) in enumerate(split_tokens[(self.datetime_nextpos + 2):]):
for counter in counters:
if token.startswith(('%s:' % counter)):
try:
counter = counter_equiv.get(counter, counter)
vars(self)[('_' + counter)] = int(token.split(':')[(- 1)].replace(',', ''))
except ValueError:
if ((counter == 'numYields') and token.startswith('numYields')):
try:
self._numYields = int(split_tokens[(((t + 1) + self.datetime_nextpos) + 2)].replace(',', ''))
except ValueError:
pass
if ((counter == 'planSummary') and token.startswith('planSummary')):
try:
self._planSummary = split_tokens[(((t + 1) + self.datetime_nextpos) + 2)]
except ValueError:
pass
break | -8,631,324,852,518,519,000 | Extract counters like nscanned and nreturned from the logevent. | mtools/util/logevent.py | _extract_counters | sindbach/mtools | python | def _extract_counters(self):
counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned', 'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields', 'planSummary', 'writeConflicts', 'keyUpdates']
counter_equiv = {'docsExamined': 'nscannedObjects', 'keysExamined': 'nscanned', 'nDeleted': 'ndeleted', 'nInserted': 'ninserted', 'nMatched': 'nreturned', 'nModified': 'nupdated'}
counters.extend(counter_equiv.keys())
split_tokens = self.split_tokens
if self.operation:
for (t, token) in enumerate(split_tokens[(self.datetime_nextpos + 2):]):
for counter in counters:
if token.startswith(('%s:' % counter)):
try:
counter = counter_equiv.get(counter, counter)
vars(self)[('_' + counter)] = int(token.split(':')[(- 1)].replace(',', ))
except ValueError:
if ((counter == 'numYields') and token.startswith('numYields')):
try:
self._numYields = int(split_tokens[(((t + 1) + self.datetime_nextpos) + 2)].replace(',', ))
except ValueError:
pass
if ((counter == 'planSummary') and token.startswith('planSummary')):
try:
self._planSummary = split_tokens[(((t + 1) + self.datetime_nextpos) + 2)]
except ValueError:
pass
break |
@property
def level(self):
'Extract log level if available (lazy).'
if (not self._level_calculated):
self._level_calculated = True
self._extract_level()
return self._level | 2,071,184,124,526,647,600 | Extract log level if available (lazy). | mtools/util/logevent.py | level | sindbach/mtools | python | @property
def level(self):
if (not self._level_calculated):
self._level_calculated = True
self._extract_level()
return self._level |
@property
def component(self):
'Extract log component if available (lazy).'
self.level
return self._component | 3,989,212,613,295,903,000 | Extract log component if available (lazy). | mtools/util/logevent.py | component | sindbach/mtools | python | @property
def component(self):
self.level
return self._component |
def _extract_level(self):
'Extract level and component if available (lazy).'
if (self._level is None):
split_tokens = self.split_tokens
if (not split_tokens):
self._level = False
self._component = False
return
x = (self.log_levels.index(split_tokens[1]) if (split_tokens[1] in self.log_levels) else None)
if (x is not None):
self._level = split_tokens[1]
self._component = split_tokens[2]
else:
self._level = False
self._component = False | -3,180,627,665,237,303,000 | Extract level and component if available (lazy). | mtools/util/logevent.py | _extract_level | sindbach/mtools | python | def _extract_level(self):
if (self._level is None):
split_tokens = self.split_tokens
if (not split_tokens):
self._level = False
self._component = False
return
x = (self.log_levels.index(split_tokens[1]) if (split_tokens[1] in self.log_levels) else None)
if (x is not None):
self._level = split_tokens[1]
self._component = split_tokens[2]
else:
self._level = False
self._component = False |
def parse_all(self):
'\n Trigger extraction of all information.\n\n These values are usually evaluated lazily.\n '
tokens = self.split_tokens
duration = self.duration
datetime = self.datetime
thread = self.thread
operation = self.operation
namespace = self.namespace
pattern = self.pattern
nscanned = self.nscanned
nscannedObjects = self.nscannedObjects
ntoreturn = self.ntoreturn
nreturned = self.nreturned
ninserted = self.ninserted
ndeleted = self.ndeleted
nupdated = self.nupdated
numYields = self.numYields
w = self.w
r = self.r | 2,178,256,742,827,818,000 | Trigger extraction of all information.
These values are usually evaluated lazily. | mtools/util/logevent.py | parse_all | sindbach/mtools | python | def parse_all(self):
'\n Trigger extraction of all information.\n\n These values are usually evaluated lazily.\n '
tokens = self.split_tokens
duration = self.duration
datetime = self.datetime
thread = self.thread
operation = self.operation
namespace = self.namespace
pattern = self.pattern
nscanned = self.nscanned
nscannedObjects = self.nscannedObjects
ntoreturn = self.ntoreturn
nreturned = self.nreturned
ninserted = self.ninserted
ndeleted = self.ndeleted
nupdated = self.nupdated
numYields = self.numYields
w = self.w
r = self.r |
def __str__(self):
'Default string conversion for LogEvent object is its line_str.'
return str(self.line_str) | -4,018,836,960,612,078,600 | Default string conversion for LogEvent object is its line_str. | mtools/util/logevent.py | __str__ | sindbach/mtools | python | def __str__(self):
return str(self.line_str) |
def to_dict(self, labels=None):
'Convert LogEvent object to a dictionary.'
output = {}
if (labels is None):
labels = ['line_str', 'split_tokens', 'datetime', 'operation', 'thread', 'namespace', 'nscanned', 'ntoreturn', 'nreturned', 'ninserted', 'nupdated', 'ndeleted', 'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if (value is not None):
output[label] = value
return output | 4,889,754,670,686,270,000 | Convert LogEvent object to a dictionary. | mtools/util/logevent.py | to_dict | sindbach/mtools | python | def to_dict(self, labels=None):
output = {}
if (labels is None):
labels = ['line_str', 'split_tokens', 'datetime', 'operation', 'thread', 'namespace', 'nscanned', 'ntoreturn', 'nreturned', 'ninserted', 'nupdated', 'ndeleted', 'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if (value is not None):
output[label] = value
return output |
def to_json(self, labels=None):
'Convert LogEvent object to valid JSON.'
output = self.to_dict(labels)
return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False) | -2,870,708,172,949,533,700 | Convert LogEvent object to valid JSON. | mtools/util/logevent.py | to_json | sindbach/mtools | python | def to_json(self, labels=None):
output = self.to_dict(labels)
return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False) |
def _parse_document(self):
'Parse system.profile doc, copy all values to member variables.'
self._reset()
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if (self._datetime.tzinfo is None):
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
self._command_calculated = True
if (self.operation == 'command'):
self._command = doc[u'command'].keys()[0]
if ('query' in doc):
if (('query' in doc['query']) and isinstance(doc['query']['query'], dict)):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif ('$query' in doc['query']):
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
if (('orderby' in doc['query']) and isinstance(doc['query']['orderby'], dict)):
self._sort_pattern = str(doc['query']['orderby']).replace("'", '"')
elif ('$orderby' in doc['query']):
self._sort_pattern = str(doc['query']['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = (doc[u'nscanned'] if ('nscanned' in doc) else None)
self._ntoreturn = (doc[u'ntoreturn'] if ('ntoreturn' in doc) else None)
self._nupdated = (doc[u'nupdated'] if ('nupdated' in doc) else None)
self._nreturned = (doc[u'nreturned'] if ('nreturned' in doc) else None)
self._ninserted = (doc[u'ninserted'] if ('ninserted' in doc) else None)
self._ndeleted = (doc[u'ndeleted'] if ('ndeleted' in doc) else None)
self._numYields = (doc[u'numYield'] if ('numYield' in doc) else None)
if (u'lockStats' in doc):
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
locks = (('w:%i' % self.w) if (self.w is not None) else ('r:%i' % self.r))
elif (u'locks' in doc):
locks = json.dumps(doc[u'locks'])
else:
locks = ''
payload = ''
if ('query' in doc):
payload += ('query: %s' % str(doc[u'query']).replace("u'", "'").replace("'", '"'))
if ('command' in doc):
payload += ('command: %s' % str(doc[u'command']).replace("u'", "'").replace("'", '"'))
if ('updateobj' in doc):
payload += (' update: %s' % str(doc[u'updateobj']).replace("u'", "'").replace("'", '"'))
scanned = (('nscanned:%i' % self._nscanned) if ('nscanned' in doc) else '')
yields = (('numYields:%i' % self._numYields) if ('numYield' in doc) else '')
duration = (('%ims' % self.duration) if (self.duration is not None) else '')
self._line_str = '[{thread}] {operation} {namespace} {payload} {scanned} {yields} locks(micros) {locks} {duration}'.format(datetime=self.datetime, thread=self.thread, operation=self.operation, namespace=self.namespace, payload=payload, scanned=scanned, yields=yields, locks=locks, duration=duration) | 1,999,894,103,749,726,000 | Parse system.profile doc, copy all values to member variables. | mtools/util/logevent.py | _parse_document | sindbach/mtools | python | def _parse_document(self):
self._reset()
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if (self._datetime.tzinfo is None):
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
self._command_calculated = True
if (self.operation == 'command'):
self._command = doc[u'command'].keys()[0]
if ('query' in doc):
if (('query' in doc['query']) and isinstance(doc['query']['query'], dict)):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif ('$query' in doc['query']):
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
if (('orderby' in doc['query']) and isinstance(doc['query']['orderby'], dict)):
self._sort_pattern = str(doc['query']['orderby']).replace("'", '"')
elif ('$orderby' in doc['query']):
self._sort_pattern = str(doc['query']['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = (doc[u'nscanned'] if ('nscanned' in doc) else None)
self._ntoreturn = (doc[u'ntoreturn'] if ('ntoreturn' in doc) else None)
self._nupdated = (doc[u'nupdated'] if ('nupdated' in doc) else None)
self._nreturned = (doc[u'nreturned'] if ('nreturned' in doc) else None)
self._ninserted = (doc[u'ninserted'] if ('ninserted' in doc) else None)
self._ndeleted = (doc[u'ndeleted'] if ('ndeleted' in doc) else None)
self._numYields = (doc[u'numYield'] if ('numYield' in doc) else None)
if (u'lockStats' in doc):
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
locks = (('w:%i' % self.w) if (self.w is not None) else ('r:%i' % self.r))
elif (u'locks' in doc):
locks = json.dumps(doc[u'locks'])
else:
locks =
payload =
if ('query' in doc):
payload += ('query: %s' % str(doc[u'query']).replace("u'", "'").replace("'", '"'))
if ('command' in doc):
payload += ('command: %s' % str(doc[u'command']).replace("u'", "'").replace("'", '"'))
if ('updateobj' in doc):
payload += (' update: %s' % str(doc[u'updateobj']).replace("u'", "'").replace("'", '"'))
scanned = (('nscanned:%i' % self._nscanned) if ('nscanned' in doc) else )
yields = (('numYields:%i' % self._numYields) if ('numYield' in doc) else )
duration = (('%ims' % self.duration) if (self.duration is not None) else )
self._line_str = '[{thread}] {operation} {namespace} {payload} {scanned} {yields} locks(micros) {locks} {duration}'.format(datetime=self.datetime, thread=self.thread, operation=self.operation, namespace=self.namespace, payload=payload, scanned=scanned, yields=yields, locks=locks, duration=duration) |
def __init__(self, weights_file: str, base_model_name: str):
' Invoke a predict method of this class to predict image quality using nima model\n '
try:
self.nima = Nima(base_model_name, weights=None)
self.nima.build()
self.nima.nima_model.load_weights(weights_file)
except Exception as e:
print('Unable to load NIMA weights', str(e))
sys.exit(1) | -2,317,369,645,536,502,000 | Invoke a predict method of this class to predict image quality using nima model | deepinsight_iqa/nima/predict.py | __init__ | sandyz1000/deepinsight-iqa | python | def __init__(self, weights_file: str, base_model_name: str):
' \n '
try:
self.nima = Nima(base_model_name, weights=None)
self.nima.build()
self.nima.nima_model.load_weights(weights_file)
except Exception as e:
print('Unable to load NIMA weights', str(e))
sys.exit(1) |
def bernstein_test_1(rep: str):
'011 . x + 1'
a = '011'
b = '1'
return bitwise_xor(bitwise_dot(a, rep), b) | 6,488,060,793,820,434,000 | 011 . x + 1 | data/p3BR/R2/benchmark/startQiskit_QC292.py | bernstein_test_1 | UCLA-SEAL/QDiff | python | def bernstein_test_1(rep: str):
a = '011'
b = '1'
return bitwise_xor(bitwise_dot(a, rep), b) |
def bernstein_test_2(rep: str):
'000 . x + 0'
a = '000'
b = '0'
return bitwise_xor(bitwise_dot(a, rep), b) | 8,969,665,367,625,561,000 | 000 . x + 0 | data/p3BR/R2/benchmark/startQiskit_QC292.py | bernstein_test_2 | UCLA-SEAL/QDiff | python | def bernstein_test_2(rep: str):
a = '000'
b = '0'
return bitwise_xor(bitwise_dot(a, rep), b) |
def bernstein_test_3(rep: str):
'111 . x + 1'
a = '111'
b = '1'
return bitwise_xor(bitwise_dot(a, rep), b) | 4,693,651,165,882,063,000 | 111 . x + 1 | data/p3BR/R2/benchmark/startQiskit_QC292.py | bernstein_test_3 | UCLA-SEAL/QDiff | python | def bernstein_test_3(rep: str):
a = '111'
b = '1'
return bitwise_xor(bitwise_dot(a, rep), b) |
def fasterMovie(self):
'Let movie faster.'
if ((self.state == self.PLAYING) or (self.state == self.READY)):
self.sendRtspRequest(self.FASTER) | 231,428,766,339,245,540 | Let movie faster. | Task2/Client_dev.py | fasterMovie | Aiemu/CourseCN-Proj-RTP | python | def fasterMovie(self):
if ((self.state == self.PLAYING) or (self.state == self.READY)):
self.sendRtspRequest(self.FASTER) |
def slowerMovie(self):
'Let movie slower.'
if ((self.state == self.PLAYING) or (self.state == self.READY)):
self.sendRtspRequest(self.SLOWER) | -1,343,215,242,521,325,300 | Let movie slower. | Task2/Client_dev.py | slowerMovie | Aiemu/CourseCN-Proj-RTP | python | def slowerMovie(self):
if ((self.state == self.PLAYING) or (self.state == self.READY)):
self.sendRtspRequest(self.SLOWER) |
def setupMovie(self):
'Setup init.'
if (self.state == self.INIT):
self.sendRtspRequest(self.SETUP) | -7,091,073,410,713,007,000 | Setup init. | Task2/Client_dev.py | setupMovie | Aiemu/CourseCN-Proj-RTP | python | def setupMovie(self):
if (self.state == self.INIT):
self.sendRtspRequest(self.SETUP) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.