response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Test comparison functions | def test_cmp():
"""Test comparison functions"""
with closing(StringIO()) as our_file:
t0 = tqdm(total=10, file=our_file)
t1 = tqdm(total=10, file=our_file)
t2 = tqdm(total=10, file=our_file)
assert t0 < t1
assert t2 >= t0
assert t0 <= t2
t3 = tqdm(total=10, file=our_file)
t4 = tqdm(total=10, file=our_file)
t5 = tqdm(total=10, file=our_file)
t5.close()
t6 = tqdm(total=10, file=our_file)
assert t3 != t4
assert t3 > t2
assert t5 == t6
t6.close()
t4.close()
t3.close()
t2.close()
t1.close()
t0.close() |
Test representation | def test_repr():
"""Test representation"""
with closing(StringIO()) as our_file:
with tqdm(total=10, ascii=True, file=our_file) as t:
assert str(t) == ' 0%| | 0/10 [00:00<?, ?it/s]' |
Test clearing bar display | def test_clear():
"""Test clearing bar display"""
with closing(StringIO()) as our_file:
t1 = tqdm(total=10, file=our_file, desc='pos0 bar', bar_format='{l_bar}')
t2 = trange(10, file=our_file, desc='pos1 bar', bar_format='{l_bar}')
before = squash_ctrlchars(our_file.getvalue())
t2.clear()
t1.clear()
after = squash_ctrlchars(our_file.getvalue())
t1.close()
t2.close()
assert before == ['pos0 bar: 0%|', 'pos1 bar: 0%|']
assert after == ['', ''] |
Test disabled clear | def test_clear_disabled():
"""Test disabled clear"""
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, desc='pos0 bar', disable=True,
bar_format='{l_bar}') as t:
t.clear()
assert our_file.getvalue() == '' |
Test refresh bar display | def test_refresh():
"""Test refresh bar display"""
with closing(StringIO()) as our_file:
t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{l_bar}', mininterval=999, miniters=999)
t2 = tqdm(total=10, file=our_file, desc='pos1 bar',
bar_format='{l_bar}', mininterval=999, miniters=999)
t1.update()
t2.update()
before = squash_ctrlchars(our_file.getvalue())
t1.refresh()
t2.refresh()
after = squash_ctrlchars(our_file.getvalue())
t1.close()
t2.close()
# Check that refreshing indeed forced the display to use realtime state
assert before == [u'pos0 bar: 0%|', u'pos1 bar: 0%|']
assert after == [u'pos0 bar: 10%|', u'pos1 bar: 10%|'] |
Test disabled repr | def test_disabled_repr(capsys):
"""Test disabled repr"""
with tqdm(total=10, disable=True) as t:
str(t)
t.update()
print(t)
out, err = capsys.readouterr()
assert not err
assert out == ' 0%| | 0/10 [00:00<?, ?it/s]\n' |
Test disabled refresh | def test_disabled_refresh():
"""Test disabled refresh"""
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, desc='pos0 bar', disable=True,
bar_format='{l_bar}', mininterval=999, miniters=999) as t:
t.update()
t.refresh()
assert our_file.getvalue() == '' |
Test write messages | def test_write():
"""Test write messages"""
s = "Hello world"
with closing(StringIO()) as our_file:
# Change format to keep only left part w/o bar and it/s rate
t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t2 = trange(10, file=our_file, desc='pos1 bar', bar_format='{l_bar}',
mininterval=0, miniters=1)
t3 = tqdm(total=10, file=our_file, desc='pos2 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t1.update()
t2.update()
t3.update()
before = our_file.getvalue()
# Write msg and see if bars are correctly redrawn below the msg
t1.write(s, file=our_file) # call as an instance method
tqdm.write(s, file=our_file) # call as a class method
after = our_file.getvalue()
t1.close()
t2.close()
t3.close()
before_squashed = squash_ctrlchars(before)
after_squashed = squash_ctrlchars(after)
assert after_squashed == [s, s] + before_squashed
# Check that no bar clearing if different file
with closing(StringIO()) as our_file_bar:
with closing(StringIO()) as our_file_write:
t1 = tqdm(total=10, file=our_file_bar, desc='pos0 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t1.update()
before_bar = our_file_bar.getvalue()
tqdm.write(s, file=our_file_write)
after_bar = our_file_bar.getvalue()
t1.close()
assert before_bar == after_bar
# Test stdout/stderr anti-mixup strategy
# Backup stdout/stderr
stde = sys.stderr
stdo = sys.stdout
# Mock stdout/stderr
with closing(StringIO()) as our_stderr:
with closing(StringIO()) as our_stdout:
sys.stderr = our_stderr
sys.stdout = our_stdout
t1 = tqdm(total=10, file=sys.stderr, desc='pos0 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t1.update()
before_err = sys.stderr.getvalue()
before_out = sys.stdout.getvalue()
tqdm.write(s, file=sys.stdout)
after_err = sys.stderr.getvalue()
after_out = sys.stdout.getvalue()
t1.close()
assert before_err == '\rpos0 bar: 0%|\rpos0 bar: 10%|'
assert before_out == ''
after_err_res = [m[0] for m in RE_pos.findall(after_err)]
exres = ['\rpos0 bar: 0%|',
'\rpos0 bar: 10%|',
'\r ',
'\r\rpos0 bar: 10%|']
pos_line_diff(after_err_res, exres)
assert after_out == s + '\n'
# Restore stdout and stderr
sys.stderr = stde
sys.stdout = stdo |
Test advance len (numpy array shape) | def test_len():
"""Test advance len (numpy array shape)"""
np = importorskip('numpy')
with closing(StringIO()) as f:
with tqdm(np.zeros((3, 4)), file=f) as t:
assert len(t) == 3 |
Test autodisable will disable on non-TTY | def test_autodisable_disable():
"""Test autodisable will disable on non-TTY"""
with closing(StringIO()) as our_file:
with tqdm(total=10, disable=None, file=our_file) as t:
t.update(3)
assert our_file.getvalue() == '' |
Test autodisable will not disable on TTY | def test_autodisable_enable():
"""Test autodisable will not disable on TTY"""
with closing(StringIO()) as our_file:
our_file.isatty = lambda: True
with tqdm(total=10, disable=None, file=our_file) as t:
t.update()
assert our_file.getvalue() != '' |
Test postfix | def test_postfix():
"""Test postfix"""
postfix = {'float': 0.321034, 'gen': 543, 'str': 'h', 'lst': [2]}
postfix_order = (('w', 'w'), ('a', 0)) # no need for OrderedDict
expected = ['float=0.321', 'gen=543', 'lst=[2]', 'str=h']
expected_order = ['w=w', 'a=0', 'float=0.321', 'gen=543', 'lst=[2]', 'str=h']
# Test postfix set at init
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{r_bar}', postfix=postfix) as t1:
t1.refresh()
out = our_file.getvalue()
# Test postfix set after init
with closing(StringIO()) as our_file:
with trange(10, file=our_file, desc='pos1 bar', bar_format='{r_bar}',
postfix=None) as t2:
t2.set_postfix(**postfix)
t2.refresh()
out2 = our_file.getvalue()
# Order of items in dict may change, so need a loop to check per item
for res in expected:
assert res in out
assert res in out2
# Test postfix (with ordered dict and no refresh) set after init
with closing(StringIO()) as our_file:
with trange(10, file=our_file, desc='pos2 bar', bar_format='{r_bar}',
postfix=None) as t3:
t3.set_postfix(postfix_order, False, **postfix)
t3.refresh() # explicit external refresh
out3 = our_file.getvalue()
out3 = out3[1:-1].split(', ')[3:]
assert out3 == expected_order
# Test postfix (with ordered dict and refresh) set after init
with closing(StringIO()) as our_file:
with trange(10, file=our_file, desc='pos2 bar',
bar_format='{r_bar}', postfix=None) as t4:
t4.set_postfix(postfix_order, True, **postfix)
t4.refresh() # double refresh
out4 = our_file.getvalue()
assert out4.count('\r') > out3.count('\r')
assert out4.count(", ".join(expected_order)) == 2
# Test setting postfix string directly
with closing(StringIO()) as our_file:
with trange(10, file=our_file, desc='pos2 bar', bar_format='{r_bar}',
postfix=None) as t5:
t5.set_postfix_str("Hello", False)
t5.set_postfix_str("World")
out5 = our_file.getvalue()
assert "Hello" not in out5
out5 = out5[1:-1].split(', ')[3:]
assert out5 == ["World"] |
Test directly assigning non-str objects to postfix | def test_postfix_direct():
"""Test directly assigning non-str objects to postfix"""
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, miniters=1, mininterval=0,
bar_format="{postfix[0][name]} {postfix[1]:>5.2f}",
postfix=[{'name': "foo"}, 42]) as t:
for i in range(10):
if i % 2:
t.postfix[0]["name"] = "abcdefghij"[i]
else:
t.postfix[1] = i
t.update()
res = our_file.getvalue()
assert "f 6.00" in res
assert "h 6.00" in res
assert "h 8.00" in res
assert "j 8.00" in res |
Test redirection of output | def test_file_redirection():
"""Test redirection of output"""
with closing(StringIO()) as our_file:
# Redirect stdout to tqdm.write()
with std_out_err_redirect_tqdm(tqdm_file=our_file):
with tqdm(total=3) as pbar:
print("Such fun")
pbar.update(1)
print("Such", "fun")
pbar.update(1)
print("Such ", end="")
print("fun")
pbar.update(1)
res = our_file.getvalue()
assert res.count("Such fun\n") == 3
assert "0/3" in res
assert "3/3" in res |
Test external write mode | def test_external_write():
"""Test external write mode"""
with closing(StringIO()) as our_file:
# Redirect stdout to tqdm.write()
for _ in trange(3, file=our_file):
del tqdm._lock # classmethod should be able to recreate lock
with tqdm.external_write_mode(file=our_file):
our_file.write("Such fun\n")
res = our_file.getvalue()
assert res.count("Such fun\n") == 3
assert "0/3" in res
assert "3/3" in res |
Test numeric `unit_scale` | def test_unit_scale():
"""Test numeric `unit_scale`"""
with closing(StringIO()) as our_file:
for _ in tqdm(range(9), unit_scale=9, file=our_file,
miniters=1, mininterval=0):
pass
out = our_file.getvalue()
assert '81/81' in out |
decorator replacing tqdm's lock with vanilla threading/multiprocessing | def patch_lock(thread=True):
"""decorator replacing tqdm's lock with vanilla threading/multiprocessing"""
try:
if thread:
from threading import RLock
else:
from multiprocessing import RLock
lock = RLock()
except (ImportError, OSError) as err:
skip(str(err))
def outer(func):
"""actual decorator"""
@wraps(func)
def inner(*args, **kwargs):
"""set & reset lock even if exceptions occur"""
default_lock = tqdm.get_lock()
try:
tqdm.set_lock(lock)
return func(*args, **kwargs)
finally:
tqdm.set_lock(default_lock)
return inner
return outer |
Test multiprocess/thread-realted features | def test_threading():
"""Test multiprocess/thread-realted features"""
pass |
Test boolean cast | def test_bool():
"""Test boolean cast"""
def internal(our_file, disable):
kwargs = {'file': our_file, 'disable': disable}
with trange(10, **kwargs) as t:
assert t
with trange(0, **kwargs) as t:
assert not t
with tqdm(total=10, **kwargs) as t:
assert bool(t)
with tqdm(total=0, **kwargs) as t:
assert not bool(t)
with tqdm([], **kwargs) as t:
assert not t
with tqdm([0], **kwargs) as t:
assert t
with tqdm(iter([]), **kwargs) as t:
assert t
with tqdm(iter([1, 2, 3]), **kwargs) as t:
assert t
with tqdm(**kwargs) as t:
try:
print(bool(t))
except TypeError:
pass
else:
raise TypeError("Expected bool(tqdm()) to fail")
# test with and without disable
with closing(StringIO()) as our_file:
internal(our_file, False)
internal(our_file, True) |
Test tqdm-like module fallback | def backendCheck(module):
"""Test tqdm-like module fallback"""
tn = module.tqdm
tr = module.trange
with closing(StringIO()) as our_file:
with tn(total=10, file=our_file) as t:
assert len(t) == 10
with tr(1337) as t:
assert len(t) == 1337 |
Test auto fallback | def test_auto():
"""Test auto fallback"""
from tqdm import auto, autonotebook
backendCheck(autonotebook)
backendCheck(auto) |
Test wrapping file-like objects | def test_wrapattr():
"""Test wrapping file-like objects"""
data = "a twenty-char string"
with closing(StringIO()) as our_file:
with closing(StringIO()) as writer:
with tqdm.wrapattr(writer, "write", file=our_file, bytes=True) as wrap:
wrap.write(data)
res = writer.getvalue()
assert data == res
res = our_file.getvalue()
assert '%.1fB [' % len(data) in res
with closing(StringIO()) as our_file:
with closing(StringIO()) as writer:
with tqdm.wrapattr(writer, "write", file=our_file, bytes=False) as wrap:
wrap.write(data)
res = our_file.getvalue()
assert '%dit [' % len(data) in res |
Test float totals | def test_float_progress():
"""Test float totals"""
with closing(StringIO()) as our_file:
with trange(10, total=9.6, file=our_file) as t:
with catch_warnings(record=True) as w:
simplefilter("always", category=TqdmWarning)
for i in t:
if i < 9:
assert not w
assert w
assert "clamping frac" in str(w[-1].message) |
Test screen shape | def test_screen_shape():
"""Test screen shape"""
# ncols
with closing(StringIO()) as our_file:
with trange(10, file=our_file, ncols=50) as t:
list(t)
res = our_file.getvalue()
assert all(len(i) == 50 for i in get_bar(res))
# no second/third bar, leave=False
with closing(StringIO()) as our_file:
kwargs = {'file': our_file, 'ncols': 50, 'nrows': 2, 'miniters': 0,
'mininterval': 0, 'leave': False}
with trange(10, desc="one", **kwargs) as t1:
with trange(10, desc="two", **kwargs) as t2:
with trange(10, desc="three", **kwargs) as t3:
list(t3)
list(t2)
list(t1)
res = our_file.getvalue()
assert "one" in res
assert "two" not in res
assert "three" not in res
assert "\n\n" not in res
assert "more hidden" in res
# double-check ncols
assert all(len(i) == 50 for i in get_bar(res)
if i.strip() and "more hidden" not in i)
# all bars, leave=True
with closing(StringIO()) as our_file:
kwargs = {'file': our_file, 'ncols': 50, 'nrows': 2,
'miniters': 0, 'mininterval': 0}
with trange(10, desc="one", **kwargs) as t1:
with trange(10, desc="two", **kwargs) as t2:
assert "two" not in our_file.getvalue()
with trange(10, desc="three", **kwargs) as t3:
assert "three" not in our_file.getvalue()
list(t3)
list(t2)
list(t1)
res = our_file.getvalue()
assert "one" in res
assert "two" in res
assert "three" in res
assert "\n\n" not in res
assert "more hidden" in res
# double-check ncols
assert all(len(i) == 50 for i in get_bar(res)
if i.strip() and "more hidden" not in i)
# second bar becomes first, leave=False
with closing(StringIO()) as our_file:
kwargs = {'file': our_file, 'ncols': 50, 'nrows': 2, 'miniters': 0,
'mininterval': 0, 'leave': False}
t1 = tqdm(total=10, desc="one", **kwargs)
with tqdm(total=10, desc="two", **kwargs) as t2:
t1.update()
t2.update()
t1.close()
res = our_file.getvalue()
assert "one" in res
assert "two" not in res
assert "more hidden" in res
t2.update()
res = our_file.getvalue()
assert "two" in res |
Test `initial` | def test_initial():
"""Test `initial`"""
with closing(StringIO()) as our_file:
for _ in tqdm(range(9), initial=10, total=19, file=our_file,
miniters=1, mininterval=0):
pass
out = our_file.getvalue()
assert '10/19' in out
assert '19/19' in out |
Test `colour` | def test_colour():
"""Test `colour`"""
with closing(StringIO()) as our_file:
for _ in tqdm(range(9), file=our_file, colour="#beefed"):
pass
out = our_file.getvalue()
assert '\x1b[38;2;%d;%d;%dm' % (0xbe, 0xef, 0xed) in out
with catch_warnings(record=True) as w:
simplefilter("always", category=TqdmWarning)
with tqdm(total=1, file=our_file, colour="charm") as t:
assert w
t.update()
assert "Unknown colour" in str(w[-1].message)
with closing(StringIO()) as our_file2:
for _ in tqdm(range(9), file=our_file2, colour="blue"):
pass
out = our_file2.getvalue()
assert '\x1b[34m' in out |
Test writing to closed file | def test_closed():
"""Test writing to closed file"""
with closing(StringIO()) as our_file:
for i in trange(9, file=our_file, miniters=1, mininterval=0):
if i == 5:
our_file.close() |
Test reversed() | def test_reversed(capsys):
"""Test reversed()"""
for _ in reversed(tqdm(range(9))):
pass
out, err = capsys.readouterr()
assert not out
assert ' 0%' in err
assert '100%' in err |
Test __contains__ doesn't iterate | def test_contains(capsys):
"""Test __contains__ doesn't iterate"""
with tqdm(list(range(9))) as t:
assert 9 not in t
assert all(i in t for i in range(9))
out, err = capsys.readouterr()
assert not out
assert ' 0%' in err
assert '100%' not in err |
Test @envwrap (basic) | def test_envwrap(monkeypatch):
"""Test @envwrap (basic)"""
monkeypatch.setenv('FUNC_A', "42")
monkeypatch.setenv('FUNC_TyPe_HiNt', "1337")
monkeypatch.setenv('FUNC_Unused', "x")
@envwrap("FUNC_")
def func(a=1, b=2, type_hint: int = None):
return a, b, type_hint
assert (42, 2, 1337) == func()
assert (99, 2, 1337) == func(a=99) |
Test @envwrap(types) | def test_envwrap_types(monkeypatch):
"""Test @envwrap(types)"""
monkeypatch.setenv('FUNC_notype', "3.14159")
@envwrap("FUNC_", types=defaultdict(lambda: literal_eval))
def func(notype=None):
return notype
assert 3.14159 == func()
monkeypatch.setenv('FUNC_number', "1")
monkeypatch.setenv('FUNC_string', "1")
@envwrap("FUNC_", types={'number': int})
def nofallback(number=None, string=None):
return number, string
assert 1, "1" == nofallback() |
Test @envwrap with typehints | def test_envwrap_annotations(monkeypatch):
"""Test @envwrap with typehints"""
monkeypatch.setenv('FUNC_number', "1.1")
monkeypatch.setenv('FUNC_string', "1.1")
@envwrap("FUNC_")
def annotated(number: Union[int, float] = None, string: int = None):
return number, string
assert 1.1, "1.1" == annotated() |
Test version string | def test_version():
"""Test version string"""
from tqdm import __version__
version_parts = re.split('[.-]', __version__)
if __version__ != "UNKNOWN":
assert 3 <= len(version_parts), "must have at least Major.minor.patch"
assert all(
isinstance(literal_eval(i), int) for i in version_parts[:3]
), "Version Major.minor.patch must be 3 integers" |
A shortcut for `tqdm.asyncio.tqdm(range(*args), **kwargs)`. | def tarange(*args, **kwargs):
"""
A shortcut for `tqdm.asyncio.tqdm(range(*args), **kwargs)`.
"""
return tqdm_asyncio(range(*args), **kwargs) |
A shortcut for `tqdm.auto.tqdm(range(*args), **kwargs)`. | def trange(*args, **kwargs):
"""
A shortcut for `tqdm.auto.tqdm(range(*args), **kwargs)`.
"""
return tqdm(range(*args), **kwargs) |
Params
------
fin : binary file with `read(buf_size : int)` method
fout : binary file with `write` (and optionally `flush`) methods.
callback : function(float), e.g.: `tqdm.update`
callback_len : If (default: True) do `callback(len(buffer))`.
Otherwise, do `callback(data) for data in buffer.split(delim)`. | def posix_pipe(fin, fout, delim=b'\\n', buf_size=256,
callback=lambda float: None, callback_len=True):
"""
Params
------
fin : binary file with `read(buf_size : int)` method
fout : binary file with `write` (and optionally `flush`) methods.
callback : function(float), e.g.: `tqdm.update`
callback_len : If (default: True) do `callback(len(buffer))`.
Otherwise, do `callback(data) for data in buffer.split(delim)`.
"""
fp_write = fout.write
if not delim:
while True:
tmp = fin.read(buf_size)
# flush at EOF
if not tmp:
getattr(fout, 'flush', lambda: None)()
return
fp_write(tmp)
callback(len(tmp))
# return
buf = b''
len_delim = len(delim)
# n = 0
while True:
tmp = fin.read(buf_size)
# flush at EOF
if not tmp:
if buf:
fp_write(buf)
if callback_len:
# n += 1 + buf.count(delim)
callback(1 + buf.count(delim))
else:
for i in buf.split(delim):
callback(i)
getattr(fout, 'flush', lambda: None)()
return # n
while True:
i = tmp.find(delim)
if i < 0:
buf += tmp
break
fp_write(buf + tmp[:i + len(delim)])
# n += 1
callback(1 if callback_len else (buf + tmp[:i]))
buf = b''
tmp = tmp[i + len_delim:] |
Parameters (internal use only)
---------
fp : file-like object for tqdm
argv : list (default: sys.argv[1:]) | def main(fp=sys.stderr, argv=None):
"""
Parameters (internal use only)
---------
fp : file-like object for tqdm
argv : list (default: sys.argv[1:])
"""
if argv is None:
argv = sys.argv[1:]
try:
log_idx = argv.index('--log')
except ValueError:
for i in argv:
if i.startswith('--log='):
logLevel = i[len('--log='):]
break
else:
logLevel = 'INFO'
else:
# argv.pop(log_idx)
# logLevel = argv.pop(log_idx)
logLevel = argv[log_idx + 1]
logging.basicConfig(level=getattr(logging, logLevel),
format="%(levelname)s:%(module)s:%(lineno)d:%(message)s")
d = tqdm.__doc__ + CLI_EXTRA_DOC
opt_types = dict(RE_OPTS.findall(d))
# opt_types['delim'] = 'chr'
for o in UNSUPPORTED_OPTS:
opt_types.pop(o)
log.debug(sorted(opt_types.items()))
# d = RE_OPTS.sub(r' --\1=<\1> : \2', d)
split = RE_OPTS.split(d)
opt_types_desc = zip(split[1::3], split[2::3], split[3::3])
d = ''.join(('\n --{0} : {2}{3}' if otd[1] == 'bool' else
'\n --{0}=<{1}> : {2}{3}').format(
otd[0].replace('_', '-'), otd[0], *otd[1:])
for otd in opt_types_desc if otd[0] not in UNSUPPORTED_OPTS)
help_short = "Usage:\n tqdm [--help | options]\n"
d = help_short + """
Options:
-h, --help Print this help and exit.
-v, --version Print version and exit.
""" + d.strip('\n') + '\n'
# opts = docopt(d, version=__version__)
if any(v in argv for v in ('-v', '--version')):
sys.stdout.write(__version__ + '\n')
sys.exit(0)
elif any(v in argv for v in ('-h', '--help')):
sys.stdout.write(d + '\n')
sys.exit(0)
elif argv and argv[0][:2] != '--':
sys.stderr.write(f"Error:Unknown argument:{argv[0]}\n{help_short}")
argv = RE_SHLEX.split(' '.join(["tqdm"] + argv))
opts = dict(zip(argv[1::3], argv[3::3]))
log.debug(opts)
opts.pop('log', True)
tqdm_args = {'file': fp}
try:
for (o, v) in opts.items():
o = o.replace('-', '_')
try:
tqdm_args[o] = cast(v, opt_types[o])
except KeyError as e:
raise TqdmKeyError(str(e))
log.debug('args:' + str(tqdm_args))
delim_per_char = tqdm_args.pop('bytes', False)
update = tqdm_args.pop('update', False)
update_to = tqdm_args.pop('update_to', False)
if sum((delim_per_char, update, update_to)) > 1:
raise TqdmKeyError("Can only have one of --bytes --update --update_to")
except Exception:
fp.write("\nError:\n" + help_short)
stdin, stdout_write = sys.stdin, sys.stdout.write
for i in stdin:
stdout_write(i)
raise
else:
buf_size = tqdm_args.pop('buf_size', 256)
delim = tqdm_args.pop('delim', b'\\n')
tee = tqdm_args.pop('tee', False)
manpath = tqdm_args.pop('manpath', None)
comppath = tqdm_args.pop('comppath', None)
if tqdm_args.pop('null', False):
class stdout(object):
@staticmethod
def write(_):
pass
else:
stdout = sys.stdout
stdout = getattr(stdout, 'buffer', stdout)
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
if manpath or comppath:
from importlib import resources
from os import path
from shutil import copyfile
def cp(name, dst):
"""copy resource `name` to `dst`"""
if hasattr(resources, 'files'):
copyfile(str(resources.files('tqdm') / name), dst)
else: # py<3.9
with resources.path('tqdm', name) as src:
copyfile(str(src), dst)
log.info("written:%s", dst)
if manpath is not None:
cp('tqdm.1', path.join(manpath, 'tqdm.1'))
if comppath is not None:
cp('completion.sh', path.join(comppath, 'tqdm_completion.sh'))
sys.exit(0)
if tee:
stdout_write = stdout.write
fp_write = getattr(fp, 'buffer', fp).write
class stdout(object): # pylint: disable=function-redefined
@staticmethod
def write(x):
with tqdm.external_write_mode(file=fp):
fp_write(x)
stdout_write(x)
if delim_per_char:
tqdm_args.setdefault('unit', 'B')
tqdm_args.setdefault('unit_scale', True)
tqdm_args.setdefault('unit_divisor', 1024)
log.debug(tqdm_args)
with tqdm(**tqdm_args) as t:
posix_pipe(stdin, stdout, '', buf_size, t.update)
elif delim == b'\\n':
log.debug(tqdm_args)
write = stdout.write
if update or update_to:
with tqdm(**tqdm_args) as t:
if update:
def callback(i):
t.update(numeric(i.decode()))
else: # update_to
def callback(i):
t.update(numeric(i.decode()) - t.n)
for i in stdin:
write(i)
callback(i)
else:
for i in tqdm(stdin, **tqdm_args):
write(i)
else:
log.debug(tqdm_args)
with tqdm(**tqdm_args) as t:
callback_len = False
if update:
def callback(i):
t.update(numeric(i.decode()))
elif update_to:
def callback(i):
t.update(numeric(i.decode()) - t.n)
else:
callback = t.update
callback_len = True
posix_pipe(stdin, stdout, delim, buf_size, callback, callback_len) |
Shortcut for `tqdm.gui.tqdm(range(*args), **kwargs)`. | def tgrange(*args, **kwargs):
"""Shortcut for `tqdm.gui.tqdm(range(*args), **kwargs)`."""
return tqdm_gui(range(*args), **kwargs) |
Shortcut for `tqdm.notebook.tqdm(range(*args), **kwargs)`. | def tnrange(*args, **kwargs):
"""Shortcut for `tqdm.notebook.tqdm(range(*args), **kwargs)`."""
return tqdm_notebook(range(*args), **kwargs) |
Shortcut for `tqdm.rich.tqdm(range(*args), **kwargs)`. | def trrange(*args, **kwargs):
"""Shortcut for `tqdm.rich.tqdm(range(*args), **kwargs)`."""
return tqdm_rich(range(*args), **kwargs) |
threading RLock | def TRLock(*args, **kwargs):
"""threading RLock"""
try:
from threading import RLock
return RLock(*args, **kwargs)
except (ImportError, OSError): # pragma: no cover
pass |
Shortcut for tqdm(range(*args), **kwargs). | def trange(*args, **kwargs):
"""Shortcut for tqdm(range(*args), **kwargs)."""
return tqdm(range(*args), **kwargs) |
Shortcut for `tqdm.tk.tqdm(range(*args), **kwargs)`. | def ttkrange(*args, **kwargs):
"""Shortcut for `tqdm.tk.tqdm(range(*args), **kwargs)`."""
return tqdm_tk(range(*args), **kwargs) |
Override parameter defaults via `os.environ[prefix + param_name]`.
Maps UPPER_CASE env vars map to lower_case param names.
camelCase isn't supported (because Windows ignores case).
Precedence (highest first):
- call (`foo(a=3)`)
- environ (`FOO_A=2`)
- signature (`def foo(a=1)`)
Parameters
----------
prefix : str
Env var prefix, e.g. "FOO_"
types : dict, optional
Fallback mappings `{'param_name': type, ...}` if types cannot be
inferred from function signature.
Consider using `types=collections.defaultdict(lambda: ast.literal_eval)`.
is_method : bool, optional
Whether to use `functools.partialmethod`. If (default: False) use `functools.partial`.
Examples
--------
```
$ cat foo.py
from tqdm.utils import envwrap
@envwrap("FOO_")
def test(a=1, b=2, c=3):
print(f"received: a={a}, b={b}, c={c}")
$ FOO_A=42 FOO_C=1337 python -c 'import foo; foo.test(c=99)'
received: a=42, b=2, c=99
``` | def envwrap(prefix, types=None, is_method=False):
"""
Override parameter defaults via `os.environ[prefix + param_name]`.
Maps UPPER_CASE env vars map to lower_case param names.
camelCase isn't supported (because Windows ignores case).
Precedence (highest first):
- call (`foo(a=3)`)
- environ (`FOO_A=2`)
- signature (`def foo(a=1)`)
Parameters
----------
prefix : str
Env var prefix, e.g. "FOO_"
types : dict, optional
Fallback mappings `{'param_name': type, ...}` if types cannot be
inferred from function signature.
Consider using `types=collections.defaultdict(lambda: ast.literal_eval)`.
is_method : bool, optional
Whether to use `functools.partialmethod`. If (default: False) use `functools.partial`.
Examples
--------
```
$ cat foo.py
from tqdm.utils import envwrap
@envwrap("FOO_")
def test(a=1, b=2, c=3):
print(f"received: a={a}, b={b}, c={c}")
$ FOO_A=42 FOO_C=1337 python -c 'import foo; foo.test(c=99)'
received: a=42, b=2, c=99
```
"""
if types is None:
types = {}
i = len(prefix)
env_overrides = {k[i:].lower(): v for k, v in os.environ.items() if k.startswith(prefix)}
part = partialmethod if is_method else partial
def wrap(func):
params = signature(func).parameters
# ignore unknown env vars
overrides = {k: v for k, v in env_overrides.items() if k in params}
# infer overrides' `type`s
for k in overrides:
param = params[k]
if param.annotation is not param.empty: # typehints
for typ in getattr(param.annotation, '__args__', (param.annotation,)):
try:
overrides[k] = typ(overrides[k])
except Exception:
pass
else:
break
elif param.default is not None: # type of default value
overrides[k] = type(param.default)(overrides[k])
else:
try: # `types` fallback
overrides[k] = types[k](overrides[k])
except KeyError: # keep unconverted (`str`)
pass
return part(func, **overrides)
return wrap |
Return a function which returns console dimensions (width, height).
Supported: linux, osx, windows, cygwin. | def _screen_shape_wrapper(): # pragma: no cover
"""
Return a function which returns console dimensions (width, height).
Supported: linux, osx, windows, cygwin.
"""
_screen_shape = None
if IS_WIN:
_screen_shape = _screen_shape_windows
if _screen_shape is None:
_screen_shape = _screen_shape_tput
if IS_NIX:
_screen_shape = _screen_shape_linux
return _screen_shape |
cygwin xterm (windows) | def _screen_shape_tput(*_): # pragma: no cover
"""cygwin xterm (windows)"""
try:
import shlex
from subprocess import check_call # nosec
return [int(check_call(shlex.split('tput ' + i))) - 1
for i in ('cols', 'lines')]
except Exception: # nosec
pass
return None, None |
Return a function which returns console width.
Supported: linux, osx, windows, cygwin. | def _environ_cols_wrapper(): # pragma: no cover
"""
Return a function which returns console width.
Supported: linux, osx, windows, cygwin.
"""
warn("Use `_screen_shape_wrapper()(file)[0]` instead of"
" `_environ_cols_wrapper()(file)`", DeprecationWarning, stacklevel=2)
shape = _screen_shape_wrapper()
if not shape:
return None
@wraps(shape)
def inner(fp):
return shape(fp)[0]
return inner |
Returns the real on-screen length of a string which may contain
ANSI control codes and wide chars. | def disp_len(data):
"""
Returns the real on-screen length of a string which may contain
ANSI control codes and wide chars.
"""
return _text_width(RE_ANSI.sub('', data)) |
Trim a string which may contain ANSI control characters. | def disp_trim(data, length):
"""
Trim a string which may contain ANSI control characters.
"""
if len(data) == disp_len(data):
return data[:length]
ansi_present = bool(RE_ANSI.search(data))
while disp_len(data) > length: # carefully delete one char at a time
data = data[:-1]
if ansi_present and bool(RE_ANSI.search(data)):
# assume ANSI reset is required
return data if data.endswith("\033[0m") else data + "\033[0m"
return data |
Registers the given `tqdm` instance with
`pandas.core.groupby.DataFrameGroupBy.progress_apply`. | def tqdm_pandas(tclass, **tqdm_kwargs):
"""
Registers the given `tqdm` instance with
`pandas.core.groupby.DataFrameGroupBy.progress_apply`.
"""
from tqdm import TqdmDeprecationWarning
if isinstance(tclass, type) or (getattr(tclass, '__name__', '').startswith(
'tqdm_')): # delayed adapter case
TqdmDeprecationWarning(
"Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm, ...)`.",
fp_write=getattr(tqdm_kwargs.get('file', None), 'write', sys.stderr.write))
tclass.pandas(**tqdm_kwargs)
else:
TqdmDeprecationWarning(
"Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm(...))`.",
fp_write=getattr(tclass.fp, 'write', sys.stderr.write))
type(tclass).pandas(deprecated_t=tclass) |
See tqdm.notebook.tqdm for full documentation | def tqdm_notebook(*args, **kwargs): # pragma: no cover
"""See tqdm.notebook.tqdm for full documentation"""
from warnings import warn
from .notebook import tqdm as _tqdm_notebook
warn("This function will be removed in tqdm==5.0.0\n"
"Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`",
TqdmDeprecationWarning, stacklevel=2)
return _tqdm_notebook(*args, **kwargs) |
Shortcut for `tqdm.notebook.tqdm(range(*args), **kwargs)`. | def tnrange(*args, **kwargs): # pragma: no cover
"""Shortcut for `tqdm.notebook.tqdm(range(*args), **kwargs)`."""
from warnings import warn
from .notebook import trange as _tnrange
warn("Please use `tqdm.notebook.trange` instead of `tqdm.tnrange`",
TqdmDeprecationWarning, stacklevel=2)
return _tnrange(*args, **kwargs) |
get (create if necessary) and then restore `tqdm_class`'s lock | def ensure_lock(tqdm_class, lock_name=""):
"""get (create if necessary) and then restore `tqdm_class`'s lock"""
old_lock = getattr(tqdm_class, '_lock', None) # don't create a new lock
lock = old_lock or tqdm_class.get_lock() # maybe create a new lock
lock = getattr(lock, lock_name, lock) # maybe subtype
tqdm_class.set_lock(lock)
yield lock
if old_lock is None:
del tqdm_class._lock
else:
tqdm_class.set_lock(old_lock) |
Implementation of `thread_map` and `process_map`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
max_workers : [default: min(32, cpu_count() + 4)].
chunksize : [default: 1].
lock_name : [default: "":str]. | def _executor_map(PoolExecutor, fn, *iterables, **tqdm_kwargs):
"""
Implementation of `thread_map` and `process_map`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
max_workers : [default: min(32, cpu_count() + 4)].
chunksize : [default: 1].
lock_name : [default: "":str].
"""
kwargs = tqdm_kwargs.copy()
if "total" not in kwargs:
kwargs["total"] = length_hint(iterables[0])
tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
max_workers = kwargs.pop("max_workers", min(32, cpu_count() + 4))
chunksize = kwargs.pop("chunksize", 1)
lock_name = kwargs.pop("lock_name", "")
with ensure_lock(tqdm_class, lock_name=lock_name) as lk:
# share lock in case workers are already using `tqdm`
with PoolExecutor(max_workers=max_workers, initializer=tqdm_class.set_lock,
initargs=(lk,)) as ex:
return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs)) |
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ThreadPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ThreadPoolExecutor.__init__`.
[default: max(32, cpu_count() + 4)]. | def thread_map(fn, *iterables, **tqdm_kwargs):
"""
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ThreadPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ThreadPoolExecutor.__init__`.
[default: max(32, cpu_count() + 4)].
"""
from concurrent.futures import ThreadPoolExecutor
return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs) |
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ProcessPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ProcessPoolExecutor.__init__`.
[default: min(32, cpu_count() + 4)].
chunksize : int, optional
Size of chunks sent to worker processes; passed to
`concurrent.futures.ProcessPoolExecutor.map`. [default: 1].
lock_name : str, optional
Member of `tqdm_class.get_lock()` to use [default: mp_lock]. | def process_map(fn, *iterables, **tqdm_kwargs):
"""
Equivalent of `list(map(fn, *iterables))`
driven by `concurrent.futures.ProcessPoolExecutor`.
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ProcessPoolExecutor.__init__`.
[default: min(32, cpu_count() + 4)].
chunksize : int, optional
Size of chunks sent to worker processes; passed to
`concurrent.futures.ProcessPoolExecutor.map`. [default: 1].
lock_name : str, optional
Member of `tqdm_class.get_lock()` to use [default: mp_lock].
"""
from concurrent.futures import ProcessPoolExecutor
if iterables and "chunksize" not in tqdm_kwargs:
# default `chunksize=1` has poor performance for large iterables
# (most time spent dispatching items to workers).
longest_iterable_len = max(map(length_hint, iterables))
if longest_iterable_len > 1000:
from warnings import warn
warn("Iterable length %d > 1000 but `chunksize` is not set."
" This may seriously degrade multiprocess performance."
" Set `chunksize=1` or more." % longest_iterable_len,
TqdmWarning, stacklevel=2)
if "lock_name" not in tqdm_kwargs:
tqdm_kwargs = tqdm_kwargs.copy()
tqdm_kwargs["lock_name"] = "mp_lock"
return _executor_map(ProcessPoolExecutor, fn, *iterables, **tqdm_kwargs) |
Shortcut for `tqdm.contrib.discord.tqdm(range(*args), **kwargs)`. | def tdrange(*args, **kwargs):
"""Shortcut for `tqdm.contrib.discord.tqdm(range(*args), **kwargs)`."""
return tqdm_discord(range(*args), **kwargs) |
Equivalent of `itertools.product`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm]. | def product(*iterables, **tqdm_kwargs):
"""
Equivalent of `itertools.product`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
"""
kwargs = tqdm_kwargs.copy()
tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
try:
lens = list(map(len, iterables))
except TypeError:
total = None
else:
total = 1
for i in lens:
total *= i
kwargs.setdefault("total", total)
with tqdm_class(**kwargs) as t:
it = itertools.product(*iterables)
for i in it:
yield i
t.update() |
Context manager redirecting console logging to `tqdm.write()`, leaving
other logging handlers (e.g. log files) unaffected.
Parameters
----------
loggers : list, optional
Which handlers to redirect (default: [logging.root]).
tqdm_class : optional
Example
-------
```python
import logging
from tqdm import trange
from tqdm.contrib.logging import logging_redirect_tqdm
LOG = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
with logging_redirect_tqdm():
for i in trange(9):
if i == 4:
LOG.info("console logging redirected to `tqdm.write()`")
# logging restored
``` | def logging_redirect_tqdm(
loggers=None, # type: Optional[List[logging.Logger]],
tqdm_class=std_tqdm # type: Type[std_tqdm]
):
# type: (...) -> Iterator[None]
"""
Context manager redirecting console logging to `tqdm.write()`, leaving
other logging handlers (e.g. log files) unaffected.
Parameters
----------
loggers : list, optional
Which handlers to redirect (default: [logging.root]).
tqdm_class : optional
Example
-------
```python
import logging
from tqdm import trange
from tqdm.contrib.logging import logging_redirect_tqdm
LOG = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
with logging_redirect_tqdm():
for i in trange(9):
if i == 4:
LOG.info("console logging redirected to `tqdm.write()`")
# logging restored
```
"""
if loggers is None:
loggers = [logging.root]
original_handlers_list = [logger.handlers for logger in loggers]
try:
for logger in loggers:
tqdm_handler = _TqdmLoggingHandler(tqdm_class)
orig_handler = _get_first_found_console_logging_handler(logger.handlers)
if orig_handler is not None:
tqdm_handler.setFormatter(orig_handler.formatter)
tqdm_handler.stream = orig_handler.stream
logger.handlers = [
handler for handler in logger.handlers
if not _is_console_logging_handler(handler)] + [tqdm_handler]
yield
finally:
for logger, original_handlers in zip(loggers, original_handlers_list):
logger.handlers = original_handlers |
Convenience shortcut for:
```python
with tqdm_class(*args, **tqdm_kwargs) as pbar:
with logging_redirect_tqdm(loggers=loggers, tqdm_class=tqdm_class):
yield pbar
```
Parameters
----------
tqdm_class : optional, (default: tqdm.std.tqdm).
loggers : optional, list.
**tqdm_kwargs : passed to `tqdm_class`. | def tqdm_logging_redirect(
*args,
# loggers=None, # type: Optional[List[logging.Logger]]
# tqdm=None, # type: Optional[Type[tqdm.tqdm]]
**kwargs
):
# type: (...) -> Iterator[None]
"""
Convenience shortcut for:
```python
with tqdm_class(*args, **tqdm_kwargs) as pbar:
with logging_redirect_tqdm(loggers=loggers, tqdm_class=tqdm_class):
yield pbar
```
Parameters
----------
tqdm_class : optional, (default: tqdm.std.tqdm).
loggers : optional, list.
**tqdm_kwargs : passed to `tqdm_class`.
"""
tqdm_kwargs = kwargs.copy()
loggers = tqdm_kwargs.pop('loggers', None)
tqdm_class = tqdm_kwargs.pop('tqdm_class', std_tqdm)
with tqdm_class(*args, **tqdm_kwargs) as pbar:
with logging_redirect_tqdm(loggers=loggers, tqdm_class=tqdm_class):
yield pbar |
Shortcut for `tqdm.contrib.slack.tqdm(range(*args), **kwargs)`. | def tsrange(*args, **kwargs):
"""Shortcut for `tqdm.contrib.slack.tqdm(range(*args), **kwargs)`."""
return tqdm_slack(range(*args), **kwargs) |
Shortcut for `tqdm.contrib.telegram.tqdm(range(*args), **kwargs)`. | def ttgrange(*args, **kwargs):
"""Shortcut for `tqdm.contrib.telegram.tqdm(range(*args), **kwargs)`."""
return tqdm_telegram(range(*args), **kwargs) |
Returns `func` | def builtin_iterable(func):
"""Returns `func`"""
warn("This function has no effect, and will be removed in tqdm==5.0.0",
TqdmDeprecationWarning, stacklevel=2)
return func |
Equivalent of `numpy.ndenumerate` or builtin `enumerate`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm]. | def tenumerate(iterable, start=0, total=None, tqdm_class=tqdm_auto, **tqdm_kwargs):
"""
Equivalent of `numpy.ndenumerate` or builtin `enumerate`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
"""
try:
import numpy as np
except ImportError:
pass
else:
if isinstance(iterable, np.ndarray):
return tqdm_class(np.ndenumerate(iterable), total=total or iterable.size,
**tqdm_kwargs)
return enumerate(tqdm_class(iterable, total=total, **tqdm_kwargs), start) |
Equivalent of builtin `zip`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm]. | def tzip(iter1, *iter2plus, **tqdm_kwargs):
"""
Equivalent of builtin `zip`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
"""
kwargs = tqdm_kwargs.copy()
tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
for i in zip(tqdm_class(iter1, **kwargs), *iter2plus):
yield i |
Equivalent of builtin `map`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm]. | def tmap(function, *sequences, **tqdm_kwargs):
"""
Equivalent of builtin `map`.
Parameters
----------
tqdm_class : [default: tqdm.auto.tqdm].
"""
for i in tzip(*sequences, **tqdm_kwargs):
yield function(*i) |
Set requies_grad=Fasle for all the networks to avoid unnecessary computations | def set_requires_grad(net, requires_grad=False):
"""
Set requies_grad=Fasle for all the networks to avoid unnecessary computations
"""
for param in net.parameters():
param.requires_grad = requires_grad |
resizing mode:
- default: resize the image to 256 and take a random resized crop of size 224;
- cen.crop: resize the image to 256 and take the center crop of size 224;
- res: resize the image to 224; | def get_train_transform(resizing='default', scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), random_horizontal_flip=True,
random_color_jitter=False, resize_size=224, norm_mean=(0.485, 0.456, 0.406),
norm_std=(0.229, 0.224, 0.225), auto_augment=None):
"""
resizing mode:
- default: resize the image to 256 and take a random resized crop of size 224;
- cen.crop: resize the image to 256 and take the center crop of size 224;
- res: resize the image to 224;
"""
transformed_img_size = 224
if resizing == 'default':
transform = T.Compose([
ResizeImage(256),
T.RandomResizedCrop(224, scale=scale, ratio=ratio)
])
elif resizing == 'cen.crop':
transform = T.Compose([
ResizeImage(256),
T.CenterCrop(224)
])
elif resizing == 'ran.crop':
transform = T.Compose([
ResizeImage(256),
T.RandomCrop(224)
])
elif resizing == 'res.':
transform = ResizeImage(resize_size)
transformed_img_size = resize_size
else:
raise NotImplementedError(resizing)
transforms = [transform]
if random_horizontal_flip:
transforms.append(T.RandomHorizontalFlip())
if auto_augment:
aa_params = dict(
translate_const=int(transformed_img_size * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in norm_mean]),
interpolation=Image.BILINEAR
)
if auto_augment.startswith('rand'):
transforms.append(rand_augment_transform(auto_augment, aa_params))
else:
transforms.append(auto_augment_transform(auto_augment, aa_params))
elif random_color_jitter:
transforms.append(T.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5))
transforms.extend([
T.ToTensor(),
T.Normalize(mean=norm_mean, std=norm_std)
])
return T.Compose(transforms) |
resizing mode:
- default: resize the image to 256 and take the center crop of size 224;
– res.: resize the image to 224 | def get_val_transform(resizing='default', resize_size=224,
norm_mean=(0.485, 0.456, 0.406), norm_std=(0.229, 0.224, 0.225)):
"""
resizing mode:
- default: resize the image to 256 and take the center crop of size 224;
– res.: resize the image to 224
"""
if resizing == 'default':
transform = T.Compose([
ResizeImage(256),
T.CenterCrop(224),
])
elif resizing == 'res.':
transform = ResizeImage(resize_size)
else:
raise NotImplementedError(resizing)
return T.Compose([
transform,
T.ToTensor(),
T.Normalize(mean=norm_mean, std=norm_std)
]) |
convert BatchNorms in the `module` into InstanceNorms | def convert_model(module):
"""convert BatchNorms in the `module` into InstanceNorms"""
source_modules = (BatchNorm1d, BatchNorm2d, BatchNorm3d)
target_modules = (InstanceNorm1d, InstanceNorm2d, InstanceNorm3d)
for src_module, tgt_module in zip(source_modules, target_modules):
if isinstance(module, src_module):
mod = tgt_module(module.num_features, module.eps, module.momentum, module.affine)
module = mod
for name, child in module.named_children():
module.add_module(name, convert_model(child))
return module |
Give a sequence of dataset class name and a sequence of dataset root directory,
return a sequence of built datasets | def build_dataset(dataset_names, dataset_roots, transform):
"""
Give a sequence of dataset class name and a sequence of dataset root directory,
return a sequence of built datasets
"""
dataset_lists = []
for dataset_name, root in zip(dataset_names, dataset_roots):
if dataset_name in ["WaterColor", "Comic"]:
dataset = VOCImageFolder(root, phase='train', transform=transform)
elif dataset_name in ["Cityscapes", "FoggyCityscapes"]:
dataset = VOCImageFolder(root, phase="trainval", transform=transform, extension=".png")
elif dataset_name in ["Sim10k"]:
dataset = VOCImageFolder(root, phase="trainval10k", transform=transform)
else:
dataset = VOCImageFolder(root, phase="trainval", transform=transform)
dataset_lists.append(dataset)
return ConcatDataset(dataset_lists) |
Give a sequence of dataset class name and a sequence of dataset root directory,
return a sequence of built datasets | def build_dataset(dataset_categories, dataset_roots):
"""
Give a sequence of dataset class name and a sequence of dataset root directory,
return a sequence of built datasets
"""
dataset_lists = []
for dataset_category, root in zip(dataset_categories, dataset_roots):
dataset_lists.append(datasets.__dict__[dataset_category](root).name)
return dataset_lists |
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation] | def build_augmentation(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
import detectron2.data.transforms as T
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
if is_train and cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
augmentation.append(
T.RandomApply(T.AugmentationList(
[
T.RandomContrast(0.6, 1.4),
T.RandomBrightness(0.6, 1.4),
T.RandomSaturation(0.6, 1.4),
T.RandomLighting(0.1)
]
), prob=0.8)
)
augmentation.append(
T.RandomApply(Grayscale(), prob=0.2)
)
return augmentation |
Create configs and perform basic setups. | def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(
cfg, args
) # if you don't like any of the default setup, write your own setup code
return cfg |
Build a LR scheduler from config. | def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""
Build a LR scheduler from config.
"""
name = cfg.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupMultiStepLR":
steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER]
if len(steps) != len(cfg.SOLVER.STEPS):
logger = logging.getLogger(__name__)
logger.warning(
"SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. "
"These values will be ignored."
)
sched = MultiStepParamScheduler(
values=[cfg.SOLVER.GAMMA ** k for k in range(len(steps) + 1)],
milestones=steps,
num_updates=cfg.SOLVER.MAX_ITER,
)
elif name == "WarmupCosineLR":
sched = CosineParamScheduler(1, 0)
elif name == "ExponentialLR":
sched = ExponentialParamScheduler(1, cfg.SOLVER.GAMMA)
return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER)
else:
raise ValueError("Unknown LR scheduler: {}".format(name))
sched = WarmupParamScheduler(
sched,
cfg.SOLVER.WARMUP_FACTOR,
cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER,
cfg.SOLVER.WARMUP_METHOD,
)
return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER) |
Create configs and perform basic setups. | def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(
cfg, args
) # if you don't like any of the default setup, write your own setup code
return cfg |
Intersections over Union between two boxes | def iou_between(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
eps: float = 1e-7,
reduction: str = "none"
):
"""Intersections over Union between two boxes"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsctk = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk
iouk = intsctk / (unionk + eps)
if reduction == 'mean':
return iouk.mean()
elif reduction == 'sum':
return iouk.sum()
else:
return iouk |
clamp (limit) the values in boxes within the widths and heights of the image. | def clamp(boxes, widths, heights):
"""clamp (limit) the values in boxes within the widths and heights of the image."""
clamped_boxes = []
for box, w, h in zip(boxes, widths, heights):
clamped_boxes.append(clamp_single(box, w, h))
return torch.stack(clamped_boxes, dim=0) |
Generate foreground proposals and background proposals from `model` and save them to the disk | def generate_proposals(model, num_classes, dataset_names, cache_root, cfg):
"""Generate foreground proposals and background proposals from `model` and save them to the disk"""
fg_proposals_list = PersistentProposalList(os.path.join(cache_root, "{}_fg.json".format(dataset_names[0])))
bg_proposals_list = PersistentProposalList(os.path.join(cache_root, "{}_bg.json".format(dataset_names[0])))
if not (fg_proposals_list.load() and bg_proposals_list.load()):
for dataset_name in dataset_names:
data_loader = build_detection_test_loader(cfg, dataset_name, mapper=ProposalMapper(cfg, False))
generator = ProposalGenerator(num_classes=num_classes)
fg_proposals_list_data, bg_proposals_list_data = inference_on_dataset(model, data_loader, generator)
fg_proposals_list.extend(fg_proposals_list_data)
bg_proposals_list.extend(bg_proposals_list_data)
fg_proposals_list.flush()
bg_proposals_list.flush()
return fg_proposals_list, bg_proposals_list |
Generate category labels for each proposals in `prop` and save them to the disk | def generate_category_labels(prop, category_adaptor, cache_filename):
"""Generate category labels for each proposals in `prop` and save them to the disk"""
prop_w_category = PersistentProposalList(cache_filename)
if not prop_w_category.load():
for p in prop:
prop_w_category.append(p)
data_loader_test = category_adaptor.prepare_test_data(flatten(prop_w_category))
predictions = category_adaptor.predict(data_loader_test)
for p in prop_w_category:
p.pred_classes = np.array([predictions.popleft() for _ in range(len(p))])
prop_w_category.flush()
return prop_w_category |
Generate bounding box labels for each proposals in `prop` and save them to the disk | def generate_bounding_box_labels(prop, bbox_adaptor, class_names, cache_filename):
"""Generate bounding box labels for each proposals in `prop` and save them to the disk"""
prop_w_bbox = PersistentProposalList(cache_filename)
if not prop_w_bbox.load():
# remove (predicted) background proposals
for p in prop:
keep_indices = (0 <= p.pred_classes) & (p.pred_classes < len(class_names))
prop_w_bbox.append(p[keep_indices])
data_loader_test = bbox_adaptor.prepare_test_data(flatten(prop_w_bbox))
predictions = bbox_adaptor.predict(data_loader_test)
for p in prop_w_bbox:
p.pred_boxes = np.array([predictions.popleft() for _ in range(len(p))])
prop_w_bbox.flush()
return prop_w_bbox |
resizing mode:
- default: resize the image to 256 and take a random resized crop of size 224;
- cen.crop: resize the image to 256 and take the center crop of size 224;
- res: resize the image to 224;
- res.|crop: resize the image to 256 and take a random crop of size 224;
- res.sma|crop: resize the image keeping its aspect ratio such that the
smaller side is 256, then take a random crop of size 224;
– inc.crop: “inception crop” from (Szegedy et al., 2015);
– cif.crop: resize the image to 224, zero-pad it by 28 on each side, then take a random crop of size 224. | def get_train_transform(resizing='default', random_horizontal_flip=True, random_color_jitter=False):
"""
resizing mode:
- default: resize the image to 256 and take a random resized crop of size 224;
- cen.crop: resize the image to 256 and take the center crop of size 224;
- res: resize the image to 224;
- res.|crop: resize the image to 256 and take a random crop of size 224;
- res.sma|crop: resize the image keeping its aspect ratio such that the
smaller side is 256, then take a random crop of size 224;
– inc.crop: “inception crop” from (Szegedy et al., 2015);
– cif.crop: resize the image to 224, zero-pad it by 28 on each side, then take a random crop of size 224.
"""
if resizing == 'default':
transform = T.Compose([
ResizeImage(256),
T.RandomResizedCrop(224)
])
elif resizing == 'cen.crop':
transform = T.Compose([
ResizeImage(256),
T.CenterCrop(224)
])
elif resizing == 'ran.crop':
transform = T.Compose([
ResizeImage(256),
T.RandomCrop(224)
])
elif resizing == 'res.':
transform = T.Resize(224)
elif resizing == 'res.|crop':
transform = T.Compose([
T.Resize((256, 256)),
T.RandomCrop(224)
])
elif resizing == "res.sma|crop":
transform = T.Compose([
T.Resize(256),
T.RandomCrop(224)
])
elif resizing == 'inc.crop':
transform = T.RandomResizedCrop(224)
elif resizing == 'cif.crop':
transform = T.Compose([
T.Resize((224, 224)),
T.Pad(28),
T.RandomCrop(224),
])
else:
raise NotImplementedError(resizing)
transforms = [transform]
if random_horizontal_flip:
transforms.append(T.RandomHorizontalFlip())
if random_color_jitter:
transforms.append(T.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5))
transforms.extend([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return T.Compose(transforms) |
resizing mode:
- default: resize the image to 256 and take the center crop of size 224;
– res.: resize the image to 224
– res.|crop: resize the image such that the smaller side is of size 256 and
then take a central crop of size 224. | def get_val_transform(resizing='default'):
"""
resizing mode:
- default: resize the image to 256 and take the center crop of size 224;
– res.: resize the image to 224
– res.|crop: resize the image such that the smaller side is of size 256 and
then take a central crop of size 224.
"""
if resizing == 'default':
transform = T.Compose([
ResizeImage(256),
T.CenterCrop(224),
])
elif resizing == 'res.':
transform = T.Resize((224, 224))
elif resizing == 'res.|crop':
transform = T.Compose([
T.Resize(256),
T.CenterCrop(224),
])
else:
raise NotImplementedError(resizing)
return T.Compose([
transform,
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]) |
resizing mode:
- default: resize the image to 256 and take a random resized crop of size 224;
- cen.crop: resize the image to 256 and take the center crop of size 224;
- res: resize the image to 224;
- res.|crop: resize the image to 256 and take a random crop of size 224;
- res.sma|crop: resize the image keeping its aspect ratio such that the
smaller side is 256, then take a random crop of size 224;
– inc.crop: “inception crop” from (Szegedy et al., 2015);
– cif.crop: resize the image to 224, zero-pad it by 28 on each side, then take a random crop of size 224. | def get_train_transform(resizing='default', random_horizontal_flip=True, random_color_jitter=False):
"""
resizing mode:
- default: resize the image to 256 and take a random resized crop of size 224;
- cen.crop: resize the image to 256 and take the center crop of size 224;
- res: resize the image to 224;
- res.|crop: resize the image to 256 and take a random crop of size 224;
- res.sma|crop: resize the image keeping its aspect ratio such that the
smaller side is 256, then take a random crop of size 224;
– inc.crop: “inception crop” from (Szegedy et al., 2015);
– cif.crop: resize the image to 224, zero-pad it by 28 on each side, then take a random crop of size 224.
"""
if resizing == 'default':
transform = T.Compose([
ResizeImage(256),
T.RandomResizedCrop(224)
])
elif resizing == 'cen.crop':
transform = T.Compose([
ResizeImage(256),
T.CenterCrop(224)
])
elif resizing == 'res.':
transform = T.Resize(224)
elif resizing == 'res.|crop':
transform = T.Compose([
T.Resize((256, 256)),
T.RandomCrop(224)
])
elif resizing == "res.sma|crop":
transform = T.Compose([
T.Resize(256),
T.RandomCrop(224)
])
elif resizing == 'inc.crop':
transform = T.RandomResizedCrop(224)
elif resizing == 'cif.crop':
transform = T.Compose([
T.Resize((224, 224)),
T.Pad(28),
T.RandomCrop(224),
])
else:
raise NotImplementedError(resizing)
transforms = [transform]
if random_horizontal_flip:
transforms.append(T.RandomHorizontalFlip())
if random_color_jitter:
transforms.append(T.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5))
transforms.extend([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return T.Compose(transforms) |
resizing mode:
- default: resize the image to 256 and take the center crop of size 224;
– res.: resize the image to 224
– res.|crop: resize the image such that the smaller side is of size 256 and
then take a central crop of size 224. | def get_val_transform(resizing='default'):
"""
resizing mode:
- default: resize the image to 256 and take the center crop of size 224;
– res.: resize the image to 224
– res.|crop: resize the image such that the smaller side is of size 256 and
then take a central crop of size 224.
"""
if resizing == 'default':
transform = T.Compose([
ResizeImage(256),
T.CenterCrop(224),
])
elif resizing == 'res.':
transform = T.Resize((224, 224))
elif resizing == 'res.|crop':
transform = T.Compose([
T.Resize(256),
T.CenterCrop(224),
])
else:
raise NotImplementedError(resizing)
return T.Compose([
transform,
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]) |
Copy state dict into the passed in ReID model. As we are using classification loss, which means we need to output
different number of classes(identities) for different datasets, we will not copy the parameters of last `fc` layer. | def copy_state_dict(model, state_dict, strip=None):
"""Copy state dict into the passed in ReID model. As we are using classification loss, which means we need to output
different number of classes(identities) for different datasets, we will not copy the parameters of last `fc` layer.
"""
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model |
resizing mode:
- default: resize the image to (height, width), zero-pad it by 10 on each size, the take a random crop of
(height, width)
- res: resize the image to(height, width) | def get_train_transform(height, width, resizing='default', random_horizontal_flip=True, random_color_jitter=False,
random_gray_scale=False, random_erasing=False):
"""
resizing mode:
- default: resize the image to (height, width), zero-pad it by 10 on each size, the take a random crop of
(height, width)
- res: resize the image to(height, width)
"""
if resizing == 'default':
transform = T.Compose([
T.Resize((height, width), interpolation=3),
T.Pad(10),
T.RandomCrop((height, width))
])
elif resizing == 'res':
transform = T.Resize((height, width), interpolation=3)
else:
raise NotImplementedError(resizing)
transforms = [transform]
if random_horizontal_flip:
transforms.append(T.RandomHorizontalFlip())
if random_color_jitter:
transforms.append(T.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3))
if random_gray_scale:
transforms.append(T.RandomGrayscale())
transforms.extend([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
if random_erasing:
transforms.append(RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406]))
return T.Compose(transforms) |
Visualize features from different domains using t-SNE. As we can have very large number of samples in each
domain, only `n_data_points_per_domain` number of samples are randomly selected in each domain. | def visualize_tsne(source_loader, target_loader, model, filename, device, n_data_points_per_domain=3000):
"""Visualize features from different domains using t-SNE. As we can have very large number of samples in each
domain, only `n_data_points_per_domain` number of samples are randomly selected in each domain.
"""
source_feature_dict = extract_reid_feature(source_loader, model, device, normalize=True)
source_feature = torch.stack(list(source_feature_dict.values())).cpu()
source_feature = source_feature[torch.randperm(len(source_feature))]
source_feature = source_feature[:n_data_points_per_domain]
target_feature_dict = extract_reid_feature(target_loader, model, device, normalize=True)
target_feature = torch.stack(list(target_feature_dict.values())).cpu()
target_feature = target_feature[torch.randperm(len(target_feature))]
target_feature = target_feature[:n_data_points_per_domain]
tsne.visualize(source_feature, target_feature, filename, source_color='cornflowerblue', target_color='darkorange')
print('T-SNE process is done, figure is saved to {}'.format(filename)) |
Compute k-reciprocal neighbors of i-th sample. Two samples f_i, f_j are k reciprocal-neighbors if and only if
each one of them is among the k-nearest samples of another sample. | def k_reciprocal_neigh(initial_rank, i, k1):
"""Compute k-reciprocal neighbors of i-th sample. Two samples f_i, f_j are k reciprocal-neighbors if and only if
each one of them is among the k-nearest samples of another sample.
"""
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = torch.nonzero(backward_k_neigh_index == i)[:, 0]
return forward_k_neigh_index[fi] |
Compute distance according to `Re-ranking Person Re-identification with k-reciprocal Encoding
(CVPR 2017) <https://arxiv.org/pdf/1701.08398.pdf>`_. | def compute_rerank_dist(target_features, k1=30, k2=6):
"""Compute distance according to `Re-ranking Person Re-identification with k-reciprocal Encoding
(CVPR 2017) <https://arxiv.org/pdf/1701.08398.pdf>`_.
"""
n = target_features.size(0)
original_dist = torch.pow(target_features, 2).sum(dim=1, keepdim=True) * 2
original_dist = original_dist.expand(n, n) - 2 * torch.mm(target_features, target_features.t())
original_dist /= original_dist.max(0)[0]
original_dist = original_dist.t()
initial_rank = torch.argsort(original_dist, dim=-1)
all_num = gallery_num = original_dist.size(0)
del target_features
nn_k1 = []
nn_k1_half = []
for i in range(all_num):
nn_k1.append(k_reciprocal_neigh(initial_rank, i, k1))
nn_k1_half.append(k_reciprocal_neigh(initial_rank, i, int(np.around(k1 / 2))))
V = torch.zeros(all_num, all_num)
for i in range(all_num):
k_reciprocal_index = nn_k1[i]
k_reciprocal_expansion_index = k_reciprocal_index
for candidate in k_reciprocal_index:
candidate_k_reciprocal_index = nn_k1_half[candidate]
if (len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len(
candidate_k_reciprocal_index)):
k_reciprocal_expansion_index = torch.cat((k_reciprocal_expansion_index, candidate_k_reciprocal_index))
k_reciprocal_expansion_index = torch.unique(k_reciprocal_expansion_index)
weight = torch.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = weight / torch.sum(weight)
if k2 != 1:
k2_rank = initial_rank[:, :k2].clone().view(-1)
V_qe = V[k2_rank]
V_qe = V_qe.view(initial_rank.size(0), k2, -1).sum(1)
V_qe /= k2
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(gallery_num):
invIndex.append(torch.nonzero(V[:, i])[:, 0])
jaccard_dist = torch.zeros_like(original_dist)
for i in range(all_num):
temp_min = torch.zeros(1, gallery_num)
indNonZero = torch.nonzero(V[i, :])[:, 0]
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + \
torch.min(V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
del invIndex
del V
pos_bool = (jaccard_dist < 0)
jaccard_dist[pos_bool] = 0.0
return jaccard_dist |
Robust entropy proposed in `FDA: Fourier Domain Adaptation for Semantic Segmentation (CVPR 2020) <https://arxiv.org/abs/2004.05498>`_
Args:
y (tensor): logits output of segmentation model in shape of :math:`(N, C, H, W)`
ita (float, optional): parameters for robust entropy. Default: 1.5
num_classes (int, optional): number of classes. Default: 19
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output. Default: ``'mean'``
Returns:
Scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(N, )`. | def robust_entropy(y, ita=1.5, num_classes=19, reduction='mean'):
""" Robust entropy proposed in `FDA: Fourier Domain Adaptation for Semantic Segmentation (CVPR 2020) <https://arxiv.org/abs/2004.05498>`_
Args:
y (tensor): logits output of segmentation model in shape of :math:`(N, C, H, W)`
ita (float, optional): parameters for robust entropy. Default: 1.5
num_classes (int, optional): number of classes. Default: 19
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output. Default: ``'mean'``
Returns:
Scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(N, )`.
"""
P = F.softmax(y, dim=1)
logP = F.log_softmax(y, dim=1)
PlogP = P * logP
ent = -1.0 * PlogP.sum(dim=1)
ent = ent / math.log(num_classes)
# compute robust entropy
ent = ent ** 2.0 + 1e-8
ent = ent ** ita
if reduction == 'mean':
return ent.mean()
else:
return ent |
Adapted from https://github.com/p-lambda/wilds
If vec is a list of Tensors, it concatenates them all along the first dimension.
If vec is a list of lists, it joins these lists together, but does not attempt to
recursively collate. This allows each element of the list to be, e.g., its own dict.
If vec is a list of dicts (with the same keys in each dict), it returns a single dict
with the same keys. For each key, it recursively collates all entries in the list. | def collate_list(vec):
"""
Adapted from https://github.com/p-lambda/wilds
If vec is a list of Tensors, it concatenates them all along the first dimension.
If vec is a list of lists, it joins these lists together, but does not attempt to
recursively collate. This allows each element of the list to be, e.g., its own dict.
If vec is a list of dicts (with the same keys in each dict), it returns a single dict
with the same keys. For each key, it recursively collates all entries in the list.
"""
if not isinstance(vec, list):
raise TypeError("collate_list must take in a list")
elem = vec[0]
if torch.is_tensor(elem):
return torch.cat(vec)
elif isinstance(elem, list):
return [obj for sublist in vec for obj in sublist]
elif isinstance(elem, dict):
return {k: collate_list([d[k] for d in vec]) for k in elem}
else:
raise TypeError("Elements of the list to collate must be tensors or dicts.") |
helper function to show an image | def matplotlib_imshow(img):
"""helper function to show an image"""
img = Denormalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)(img)
img = np.transpose(img.numpy(), (1, 2, 0))
plt.imshow(img) |
Generates matplotlib Figure using a trained network, along with images
and labels from a batch, that shows the network's top prediction along
with its probability, alongside the actual label, coloring this
information based on whether the prediction was correct or not.
Uses the "images_to_probs" function. | def plot_classes_preds(images, labels, outputs, class_names, metadata, metadata_map, nrows=4):
'''
Generates matplotlib Figure using a trained network, along with images
and labels from a batch, that shows the network's top prediction along
with its probability, alongside the actual label, coloring this
information based on whether the prediction was correct or not.
Uses the "images_to_probs" function.
'''
# convert output probabilities to predicted class
_, preds_tensor = torch.max(outputs, 1)
preds = np.squeeze(preds_tensor.numpy())
probs = [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, outputs)]
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(12, nrows * 4))
domains = get_domain_names(metadata, metadata_map)
for idx in np.arange(min(nrows * 4, len(images))):
ax = fig.add_subplot(nrows, 4, idx + 1, xticks=[], yticks=[])
matplotlib_imshow(images[idx])
ax.set_title("{0}, {1:.1f}%\n(label: {2}\ndomain: {3})".format(
class_names[preds[idx]],
probs[idx] * 100.0,
class_names[labels[idx]],
domains[idx],
), color=("green" if preds[idx] == labels[idx].item() else "red"))
return fig |
Every item of y_target has n elements which may be labeled by nan.
Nan values should not be used while calculating loss.
So extract elements which are not nan first, and then calculate loss. | def reduced_bce_logit_loss(y_pred, y_target):
"""
Every item of y_target has n elements which may be labeled by nan.
Nan values should not be used while calculating loss.
So extract elements which are not nan first, and then calculate loss.
"""
loss = nn.BCEWithLogitsLoss(reduction='none').cuda()
is_labeled = ~torch.isnan(y_target)
y_pred = y_pred[is_labeled].float()
y_target = y_target[is_labeled].float()
metrics = loss(y_pred, y_target)
return metrics.mean() |
Adapted from https://github.com/p-lambda/wilds
If vec is a list of Tensors, it concatenates them all along the first dimension.
If vec is a list of lists, it joins these lists together, but does not attempt to
recursively collate. This allows each element of the list to be, e.g., its own dict.
If vec is a list of dicts (with the same keys in each dict), it returns a single dict
with the same keys. For each key, it recursively collates all entries in the list. | def collate_list(vec):
"""
Adapted from https://github.com/p-lambda/wilds
If vec is a list of Tensors, it concatenates them all along the first dimension.
If vec is a list of lists, it joins these lists together, but does not attempt to
recursively collate. This allows each element of the list to be, e.g., its own dict.
If vec is a list of dicts (with the same keys in each dict), it returns a single dict
with the same keys. For each key, it recursively collates all entries in the list.
"""
if not isinstance(vec, list):
raise TypeError("collate_list must take in a list")
elem = vec[0]
if torch.is_tensor(elem):
return torch.cat(vec)
elif isinstance(elem, list):
return [obj for sublist in vec for obj in sublist]
elif isinstance(elem, dict):
return {k: collate_list([d[k] for d in vec]) for k in elem}
else:
raise TypeError("Elements of the list to collate must be tensors or dicts.") |
Adapted from https://github.com/p-lambda/wilds
If vec is a list of Tensors, it concatenates them all along the first dimension.
If vec is a list of lists, it joins these lists together, but does not attempt to
recursively collate. This allows each element of the list to be, e.g., its own dict.
If vec is a list of dicts (with the same keys in each dict), it returns a single dict
with the same keys. For each key, it recursively collates all entries in the list. | def collate_list(vec):
"""
Adapted from https://github.com/p-lambda/wilds
If vec is a list of Tensors, it concatenates them all along the first dimension.
If vec is a list of lists, it joins these lists together, but does not attempt to
recursively collate. This allows each element of the list to be, e.g., its own dict.
If vec is a list of dicts (with the same keys in each dict), it returns a single dict
with the same keys. For each key, it recursively collates all entries in the list.
"""
if not isinstance(vec, list):
raise TypeError("collate_list must take in a list")
elem = vec[0]
if torch.is_tensor(elem):
return torch.cat(vec)
elif isinstance(elem, list):
return [obj for sublist in vec for obj in sublist]
elif isinstance(elem, dict):
return {k: collate_list([d[k] for d in vec]) for k in elem}
else:
raise TypeError("Elements of the list to collate must be tensors or dicts.") |
Adapted from https://github.com/p-lambda/wilds | def get_transform(arch, max_token_length):
"""
Adapted from https://github.com/p-lambda/wilds
"""
if arch == 'distilbert-base-uncased':
tokenizer = DistilBertTokenizerFast.from_pretrained(arch)
else:
raise ValueError("Model: {arch} not recognized".format(arch))
def transform(text):
tokens = tokenizer(text, padding='max_length', truncation=True,
max_length=max_token_length, return_tensors='pt')
if arch == 'bert_base_uncased':
x = torch.stack(
(
tokens["input_ids"],
tokens["attention_mask"],
tokens["token_type_ids"],
),
dim=2,
)
elif arch == 'distilbert-base-uncased':
x = torch.stack((tokens["input_ids"], tokens["attention_mask"]), dim=2)
x = torch.squeeze(x, dim=0) # First shape dim is always 1
return x
return transform |
Adapted from https://github.com/p-lambda/wilds
If vec is a list of Tensors, it concatenates them all along the first dimension.
If vec is a list of lists, it joins these lists together, but does not attempt to
recursively collate. This allows each element of the list to be, e.g., its own dict.
If vec is a list of dicts (with the same keys in each dict), it returns a single dict
with the same keys. For each key, it recursively collates all entries in the list. | def collate_list(vec):
"""
Adapted from https://github.com/p-lambda/wilds
If vec is a list of Tensors, it concatenates them all along the first dimension.
If vec is a list of lists, it joins these lists together, but does not attempt to
recursively collate. This allows each element of the list to be, e.g., its own dict.
If vec is a list of dicts (with the same keys in each dict), it returns a single dict
with the same keys. For each key, it recursively collates all entries in the list.
"""
if not isinstance(vec, list):
raise TypeError("collate_list must take in a list")
elem = vec[0]
if torch.is_tensor(elem):
return torch.cat(vec)
elif isinstance(elem, list):
return [obj for sublist in vec for obj in sublist]
elif isinstance(elem, dict):
return {k: collate_list([d[k] for d in vec]) for k in elem}
else:
raise TypeError("Elements of the list to collate must be tensors or dicts.") |
Return a pair of datasets corresponding to a random split of the given
dataset, with n data points in the first dataset and the rest in the last,
using the given random seed | def split_dataset(dataset, n, seed=0):
"""
Return a pair of datasets corresponding to a random split of the given
dataset, with n data points in the first dataset and the rest in the last,
using the given random seed
"""
assert (n <= len(dataset))
idxes = list(range(len(dataset)))
np.random.RandomState(seed).shuffle(idxes)
subset_1 = idxes[:n]
subset_2 = idxes[n:]
return Subset(dataset, subset_1), Subset(dataset, subset_2) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.