code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def after_fork_in_parent(child_pid: int) -> None:
"""The parent process should invoke this function after a fork.
Invoked by replacement_fork.py.
"""
Scalene.add_child_pid(child_pid)
Scalene.start_signal_queues() | The parent process should invoke this function after a fork.
Invoked by replacement_fork.py. | after_fork_in_parent | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def after_fork_in_child() -> None:
"""
Executed by a child process after a fork; mutates the
current profiler into a child.
Invoked by replacement_fork.py.
"""
Scalene.__is_child = True
Scalene.clear_metrics()
if Scalene.__accelerator and Scalene.__accelerator.has_gpu():
Scalene.__accelerator.reinit()
# Note: __parent_pid of the topmost process is its own pid.
Scalene.__pid = Scalene.__parent_pid
if "off" not in Scalene.__args or not Scalene.__args.off:
Scalene.enable_signals() | Executed by a child process after a fork; mutates the
current profiler into a child.
Invoked by replacement_fork.py. | after_fork_in_child | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def memcpy_sigqueue_processor(
_signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
frame: FrameType,
) -> None:
"""Process memcpy signals (used in a ScaleneSigQueue)."""
curr_pid = os.getpid()
arr: List[Tuple[str, int, int, int, int]] = []
# Process the input array.
with contextlib.suppress(ValueError):
while Scalene.__memcpy_mapfile.read():
count_str = Scalene.__memcpy_mapfile.get_str()
(
memcpy_time_str,
count_str2,
pid,
filename,
lineno,
bytei,
) = count_str.split(",")
if int(curr_pid) != int(pid):
continue
arr.append(
(
filename,
int(lineno),
int(bytei),
int(memcpy_time_str),
int(count_str2),
)
)
arr.sort()
for item in arr:
filename, linenum, byteindex, _memcpy_time, count = item
fname = Filename(filename)
line_no = LineNumber(linenum)
byteidx = ByteCodeIndex(byteindex)
# Add the byte index to the set for this line.
Scalene.__stats.bytei_map[fname][line_no].add(byteidx)
Scalene.__stats.memcpy_samples[fname][line_no] += int(count) | Process memcpy signals (used in a ScaleneSigQueue). | memcpy_sigqueue_processor | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def should_trace(filename: Filename, func: str) -> bool:
"""Return true if we should trace this filename and function."""
if not filename:
return False
if Scalene.__profiler_base in filename:
# Don't profile the profiler.
return False
if Scalene.__functions_to_profile:
if filename in Scalene.__functions_to_profile:
if func in {
fn.__code__.co_name
for fn in Scalene.__functions_to_profile[filename]
}:
return True
return False
# Don't profile the Python libraries, unless overridden by --profile-all
try:
resolved_filename = str(pathlib.Path(filename).resolve())
except OSError:
# Not a file
return False
if not Scalene.__args.profile_all:
for n in sysconfig.get_scheme_names():
for p in sysconfig.get_path_names():
the_path = sysconfig.get_path(p, n)
libdir = str(pathlib.Path(the_path).resolve())
if libdir in resolved_filename:
return False
# Generic handling follows (when no @profile decorator has been used).
# TODO [EDB]: add support for this in traceconfig.cpp
profile_exclude_list = Scalene.__args.profile_exclude.split(",")
if any(
prof in filename for prof in profile_exclude_list if prof != ""
):
return False
if filename.startswith("_ipython-input-"):
# Profiling code created in a Jupyter cell:
# create a file to hold the contents.
import IPython
if result := re.match(r"_ipython-input-([0-9]+)-.*", filename):
# Write the cell's contents into the file.
cell_contents = (
IPython.get_ipython().history_manager.input_hist_raw[
int(result[1])
]
)
with open(filename, "w+") as f:
f.write(cell_contents)
return True
# If (a) `profile-only` was used, and (b) the file matched
# NONE of the provided patterns, don't profile it.
profile_only_set = set(Scalene.__args.profile_only.split(","))
if profile_only_set and all(
prof not in filename for prof in profile_only_set
):
return False
if filename[0] == "<" and filename[-1] == ">":
# Special non-file
return False
# Now we've filtered out any non matches to profile-only patterns.
# If `profile-all` is specified, profile this file.
if Scalene.__args.profile_all:
return True
# Profile anything in the program's directory or a child directory,
# but nothing else, unless otherwise specified.
filename = Filename(
os.path.normpath(os.path.join(Scalene.__program_path, filename))
)
return Scalene.__program_path in filename | Return true if we should trace this filename and function. | should_trace | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def start() -> None:
"""Initiate profiling."""
if not Scalene.__initialized:
print(
"ERROR: Do not try to invoke `start` if you have not called Scalene using one of the methods\n"
"in https://github.com/plasma-umass/scalene#using-scalene\n"
"(The most likely issue is that you need to run your code with `scalene`, not `python`).",
file=sys.stderr,
)
sys.exit(1)
Scalene.__stats.start_clock()
Scalene.enable_signals()
Scalene.__start_time = time.monotonic_ns()
Scalene.__done = False
if Scalene.__args.memory:
from scalene import pywhere # type: ignore
pywhere.set_scalene_done_false() | Initiate profiling. | start | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def stop() -> None:
"""Complete profiling."""
Scalene.__done = True
if Scalene.__args.memory:
from scalene import pywhere # type: ignore
pywhere.set_scalene_done_true()
Scalene.disable_signals()
Scalene.__stats.stop_clock()
if Scalene.__args.outfile:
Scalene.__profile_filename = os.path.join(
os.path.dirname(Scalene.__args.outfile),
os.path.basename(Scalene.__profile_filename),
)
if (
Scalene.__args.web
and not Scalene.__args.cli
and not Scalene.__is_child
):
# First, check for a browser.
try:
if not find_browser():
# Could not open a graphical web browser tab;
# act as if --web was not specified
Scalene.__args.web = False
else:
# Force JSON output to profile.json.
Scalene.__args.json = True
Scalene.__output.html = False
Scalene.__output.output_file = Scalene.__profile_filename
except Exception:
# Couldn't find a browser.
Scalene.__args.web = False
# If so, set variables appropriately.
if Scalene.__args.web and Scalene.in_jupyter():
# Force JSON output to profile.json.
Scalene.__args.json = True
Scalene.__output.html = False
Scalene.__output.output_file = Scalene.__profile_filename | Complete profiling. | stop | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def is_done() -> bool:
"""Return true if Scalene has stopped profiling."""
return Scalene.__done | Return true if Scalene has stopped profiling. | is_done | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def start_signal_handler(
_signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
_this_frame: Optional[FrameType],
) -> None:
"""Respond to a signal to start or resume profiling (--on).
See scalene_parseargs.py.
"""
for pid in Scalene.child_pids:
Scalene.__orig_kill(pid, Scalene.__signals.start_profiling_signal)
Scalene.start() | Respond to a signal to start or resume profiling (--on).
See scalene_parseargs.py. | start_signal_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def stop_signal_handler(
_signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
_this_frame: Optional[FrameType],
) -> None:
"""Respond to a signal to suspend profiling (--off).
See scalene_parseargs.py.
"""
for pid in Scalene.child_pids:
Scalene.__orig_kill(pid, Scalene.__signals.stop_profiling_signal)
Scalene.stop()
# Output the profile if `--outfile` was set to a file.
if Scalene.__output.output_file:
Scalene.output_profile(sys.argv) | Respond to a signal to suspend profiling (--off).
See scalene_parseargs.py. | stop_signal_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def disable_signals(retry: bool = True) -> None:
"""Turn off the profiling signals."""
if sys.platform == "win32":
Scalene.timer_signals = False
return
try:
assert Scalene.__signals.cpu_timer_signal is not None
Scalene.__orig_setitimer(Scalene.__signals.cpu_timer_signal, 0)
for sig in [
Scalene.__signals.malloc_signal,
Scalene.__signals.free_signal,
Scalene.__signals.memcpy_signal,
]:
Scalene.__orig_signal(sig, signal.SIG_IGN)
Scalene.stop_signal_queues()
except Exception:
# Retry just in case we get interrupted by one of our own signals.
if retry:
Scalene.disable_signals(retry=False) | Turn off the profiling signals. | disable_signals | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def exit_handler() -> None:
"""When we exit, disable all signals."""
Scalene.disable_signals()
# Delete the temporary directory.
with contextlib.suppress(Exception):
if not Scalene.__pid:
Scalene.__python_alias_dir.cleanup() # type: ignore
with contextlib.suppress(Exception):
os.remove(f"/tmp/scalene-malloc-lock{os.getpid()}") | When we exit, disable all signals. | exit_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def profile_code(
self,
code: str,
the_globals: Dict[str, str],
the_locals: Dict[str, str],
left: List[str],
) -> int:
"""Initiate execution and profiling."""
if Scalene.__args.memory:
from scalene import pywhere # type: ignore
pywhere.populate_struct()
# If --off is set, tell all children to not profile and stop profiling before we even start.
if "off" not in Scalene.__args or not Scalene.__args.off:
self.start()
# Run the code being profiled.
exit_status = 0
try:
exec(code, the_globals, the_locals)
except SystemExit as se:
# Intercept sys.exit and propagate the error code.
exit_status = se.code if isinstance(se.code, int) else 1
except KeyboardInterrupt:
# Cleanly handle keyboard interrupts (quits execution and dumps the profile).
print("Scalene execution interrupted.", file=sys.stderr)
except Exception as e:
print(f"{Scalene.__error_message}:\n", e, file=sys.stderr)
traceback.print_exc()
exit_status = 1
finally:
self.stop()
if Scalene.__args.memory:
pywhere.disable_settrace()
pywhere.depopulate_struct()
# Leaving here in case of reversion
# sys.settrace(None)
stats = Scalene.__stats
(last_file, last_line, _) = Scalene.last_profiled_tuple()
stats.memory_malloc_count[last_file][last_line] += 1
stats.memory_aggregate_footprint[last_file][
last_line
] += stats.memory_current_highwater_mark[last_file][last_line]
# If we've collected any samples, dump them.
did_output = Scalene.output_profile(left)
if not did_output:
print(
"Scalene: The specified code did not run for long enough to profile.",
file=sys.stderr,
)
# Print out hints to explain why the above message may have been printed.
if not Scalene.__args.profile_all:
# if --profile-all was not specified, suggest it
# as a way to profile otherwise excluded code
# (notably Python libraries, which are excluded by
# default).
print(
"By default, Scalene only profiles code in the file executed and its subdirectories.",
file=sys.stderr,
)
print(
"To track the time spent in all files, use the `--profile-all` option.",
file=sys.stderr,
)
elif (
Scalene.__args.profile_only
or Scalene.__args.profile_exclude
):
# if --profile-only or --profile-exclude were
# specified, suggest that the patterns might be
# excluding too many files. Collecting the
# previously filtered out files could allow
# suggested fixes (as in, remove foo because it
# matches too many files).
print(
"The patterns used in `--profile-only` or `--profile-exclude` may be filtering out too many files.",
file=sys.stderr,
)
else:
# if none of the above cases hold, indicate that
# Scalene can only profile code that runs for at
# least one second or allocates some threshold
# amount of memory.
print(
"Scalene can only profile code that runs for at least one second or allocates at least 10MB.",
file=sys.stderr,
)
if not (
did_output
and Scalene.__args.web
and not Scalene.__args.cli
and not Scalene.__is_child
):
return exit_status
if Scalene.__args.web or Scalene.__args.html:
profile_filename = Scalene.__profile_filename
if Scalene.__args.outfile:
profile_filename = os.path.join(
os.path.dirname(Scalene.__args.outfile),
os.path.splitext(os.path.basename(Scalene.__args.outfile))[0] + ".json"
)
generate_html(
profile_fname=profile_filename,
output_fname=(
Scalene.__profiler_html if not Scalene.__args.outfile
else Scalene.__args.outfile
),
)
if Scalene.in_jupyter():
from scalene.scalene_jupyter import ScaleneJupyter
port = ScaleneJupyter.find_available_port(8181, 9000)
if not port:
print(
"Scalene error: could not find an available port.",
file=sys.stderr,
)
else:
ScaleneJupyter.display_profile(
port, Scalene.__profiler_html
)
else:
if not Scalene.__args.no_browser:
# Remove any interposition libraries from the environment before opening the browser.
# See also scalene/scalene_preload.py
old_dyld = os.environ.pop("DYLD_INSERT_LIBRARIES", "")
old_ld = os.environ.pop("LD_PRELOAD", "")
output_fname = (
f"{os.getcwd()}{os.sep}{Scalene.__profiler_html}"
)
if Scalene.__pid == 0:
# Only open a browser tab for the parent.
dir = os.path.dirname(__file__)
subprocess.Popen(
[
Scalene.__orig_python,
f"{dir}{os.sep}launchbrowser.py",
output_fname,
str(scalene.scalene_config.SCALENE_PORT),
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# Restore them.
os.environ.update(
{
"DYLD_INSERT_LIBRARIES": old_dyld,
"LD_PRELOAD": old_ld,
}
)
return exit_status | Initiate execution and profiling. | profile_code | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def process_args(args: argparse.Namespace) -> None:
"""Process all arguments."""
Scalene.__args = ScaleneArguments(**vars(args))
Scalene.__next_output_time = (
time.perf_counter() + Scalene.__args.profile_interval
)
Scalene.__output.html = Scalene.__args.html
if Scalene.__args.outfile:
Scalene.__output.output_file = os.path.abspath(
os.path.expanduser(Scalene.__args.outfile)
)
Scalene.__is_child = Scalene.__args.pid != 0
# the pid of the primary profiler
Scalene.__parent_pid = Scalene.__args.pid if Scalene.__is_child else os.getpid()
# Don't profile the GPU if not enabled (i.e., either no options or --cpu and/or --memory, but no --gpu).
if not Scalene.__args.gpu:
Scalene.__output.gpu = False
Scalene.__json.gpu = False | Process all arguments. | process_args | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def set_initialized() -> None:
"""Indicate that Scalene has been initialized and is ready to begin profiling."""
Scalene.__initialized = True | Indicate that Scalene has been initialized and is ready to begin profiling. | set_initialized | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def main() -> None:
"""Initialize and profile."""
(
args,
left,
) = ScaleneParseArgs.parse_args()
# Try to profile an accelerator if one is found and `--gpu` is selected / it's the default (see ScaleneArguments).
if args.gpu:
if platform.system() == "Darwin":
from scalene.scalene_apple_gpu import ScaleneAppleGPU
Scalene.__accelerator = ScaleneAppleGPU()
else:
from scalene.scalene_nvidia_gpu import ScaleneNVIDIAGPU
Scalene.__accelerator = ScaleneNVIDIAGPU()
if not Scalene.__accelerator.has_gpu():
# Failover to try Neuron
from scalene.scalene_neuron import ScaleneNeuron
Scalene.__accelerator = ScaleneNeuron()
Scalene.__output.gpu = Scalene.__accelerator.has_gpu()
Scalene.__json.gpu = Scalene.__output.gpu
Scalene.__json.gpu_device = Scalene.__accelerator.gpu_device()
else:
Scalene.__accelerator = None
Scalene.__output.gpu = False
Scalene.__json.gpu = False
Scalene.__json.gpu_device = ""
Scalene.set_initialized()
Scalene.run_profiler(args, left) | Initialize and profile. | main | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def register_files_to_profile() -> None:
"""Tells the pywhere module, which tracks memory, which files to profile."""
from scalene import pywhere # type: ignore
profile_only_list = Scalene.__args.profile_only.split(",")
pywhere.register_files_to_profile(
list(Scalene.__files_to_profile) + profile_only_list,
Scalene.__program_path,
Scalene.__args.profile_all,
) | Tells the pywhere module, which tracks memory, which files to profile. | register_files_to_profile | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def run_profiler(
args: argparse.Namespace, left: List[str], is_jupyter: bool = False
) -> None:
"""Set up and initiate profiling."""
# Set up signal handlers for starting and stopping profiling.
if is_jupyter:
Scalene.set_in_jupyter()
if not Scalene.__initialized:
print(
"ERROR: Do not try to manually invoke `run_profiler`.\n"
"To invoke Scalene programmatically, see the usage noted in https://github.com/plasma-umass/scalene#using-scalene",
file=sys.stderr,
)
sys.exit(1)
if sys.platform != "win32":
for sig, handler in [
(
Scalene.__signals.start_profiling_signal,
Scalene.start_signal_handler,
),
(
Scalene.__signals.stop_profiling_signal,
Scalene.stop_signal_handler,
),
]:
Scalene.__orig_signal(sig, handler)
Scalene.__orig_siginterrupt(sig, False)
Scalene.__orig_signal(signal.SIGINT, Scalene.interruption_handler)
did_preload = (
False if is_jupyter else ScalenePreload.setup_preload(args)
)
if not did_preload:
with contextlib.suppress(Exception):
# If running in the background, print the PID.
if os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()):
# In the background.
print(
f"Scalene now profiling process {os.getpid()}",
file=sys.stderr,
)
print(
f" to disable profiling: python3 -m scalene.profile --off --pid {os.getpid()}",
file=sys.stderr,
)
print(
f" to resume profiling: python3 -m scalene.profile --on --pid {os.getpid()}",
file=sys.stderr,
)
Scalene.__stats.clear_all()
sys.argv = left
with contextlib.suppress(Exception):
if not is_jupyter:
multiprocessing.set_start_method("fork")
spec = None
try:
Scalene.process_args(args)
progs = None
exit_status = 0
try:
# Handle direct invocation of a string by executing the string and returning.
if len(sys.argv) >= 2 and sys.argv[0] == "-c":
try:
exec(sys.argv[1])
except SyntaxError:
traceback.print_exc()
sys.exit(1)
sys.exit(0)
if len(sys.argv) >= 2 and sys.argv[0] == "-m":
module = True
# Remove -m and the provided module name
_, mod_name, *sys.argv = sys.argv
# Given `some.module`, find the path of the corresponding
# some/module/__main__.py or some/module.py file to run.
_, spec, _ = _get_module_details(mod_name)
if not spec.origin:
raise FileNotFoundError
# Prepend the found .py file to arguments
sys.argv.insert(0, spec.origin)
else:
module = False
# Look for something ending in '.py'. Treat the first one as our executable.
progs = [x for x in sys.argv if re.match(r".*\.py$", x)]
# Just in case that didn't work, try sys.argv[0] and __file__.
with contextlib.suppress(Exception):
progs.extend((sys.argv[0], __file__))
if not progs:
raise FileNotFoundError
# Use the full absolute path of the program being profiled, expanding ~ if need be.
prog_name = os.path.abspath(os.path.expanduser(progs[0]))
with open(
prog_name, "r", encoding="utf-8"
) as prog_being_profiled:
# Read in the code and compile it.
code: Any = ""
try:
code = compile(
prog_being_profiled.read(),
prog_name,
"exec",
)
except SyntaxError:
traceback.print_exc()
sys.exit(1)
# Push the program's path.
program_path = Filename(os.path.dirname(prog_name))
if not module:
sys.path.insert(0, program_path)
# NOTE: Python, in its standard mode of operation,
# places the root of the module tree at the directory of
# the entrypoint script. This is different in how things
# work with the `-m` mode of operation, so for now we do not
# surface this in Scalene
#
# TODO: Add in entrypoint_dir logic for `-m` operation
Scalene.__entrypoint_dir = program_path
# If a program path was specified at the command-line, use it.
if len(Scalene.__args.program_path) > 0:
Scalene.__program_path = Filename(
os.path.abspath(args.program_path)
)
else:
# Otherwise, use the invoked directory.
Scalene.__program_path = program_path
# Grab local and global variables.
if Scalene.__args.memory:
Scalene.register_files_to_profile()
import __main__
the_locals = __main__.__dict__
the_globals = __main__.__dict__
# Splice in the name of the file being executed instead of the profiler.
the_globals["__file__"] = prog_name
# This part works because of the order in which Python attempts to resolve names--
# Within a given context, it first tries to look for __package__, and then for __spec__.
# __spec__ is a ModuleSpec object that carries a lot of extra machinery and requires
# extra effort to create (it seems, at least).
#
# __spec__ was originally set to none because the __globals__ here has the Scalene ModuleSpec
# but it doesn't seem like that was enough. Setting the __package__, as below, seems to be enough to make
# it look in the right place
the_globals["__spec__"] = None
if spec is not None:
name = spec.name
the_globals["__package__"] = name.split(".")[0]
# Do a GC before we start.
gc.collect()
# Start the profiler.
profiler = Scalene(args, Filename(prog_name))
try:
# We exit with this status (returning error code as appropriate).
exit_status = profiler.profile_code(
code, the_locals, the_globals, left
)
if not is_jupyter:
sys.exit(exit_status)
except StopJupyterExecution:
# Running in Jupyter notebooks
pass
except AttributeError:
# don't let the handler below mask programming errors
raise
except Exception as ex:
template = "Scalene: An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
except (FileNotFoundError, IOError):
if progs:
print(
f"Scalene: could not find input file {prog_name}",
file=sys.stderr,
)
else:
print("Scalene: no input file specified.", file=sys.stderr)
sys.exit(1)
except SystemExit as e:
exit_status = e.code if isinstance(e.code, int) else 1
except StopJupyterExecution:
pass
except Exception:
print(
"Scalene failed to initialize.\n" + traceback.format_exc(),
file=sys.stderr,
)
sys.exit(1)
finally:
with contextlib.suppress(Exception):
for mapfile in [
Scalene.__malloc_mapfile,
Scalene.__memcpy_mapfile,
]:
mapfile.close()
if not Scalene.__is_child:
mapfile.cleanup()
if not is_jupyter:
sys.exit(exit_status) | Set up and initiate profiling. | run_profiler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def is_call_function(code: CodeType, bytei: ByteCodeIndex) -> bool:
"""Returns true iff the bytecode at the given index is a function call."""
return any(
(
ins.offset == bytei
and ins.opcode in ScaleneFuncUtils.__call_opcodes
)
for ins in dis.get_instructions(code)
) | Returns true iff the bytecode at the given index is a function call. | is_call_function | python | plasma-umass/scalene | scalene/scalene_funcutils.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_funcutils.py | Apache-2.0 |
def clear(self) -> None:
"""Reset all statistics except for memory footprint."""
self.start_time = 0
self.elapsed_time = 0
self.alloc_samples = 0
self.stacks.clear()
self.cpu_samples_python.clear()
self.cpu_samples_c.clear()
self.cpu_utilization.clear()
self.core_utilization.clear()
self.cpu_samples.clear()
self.gpu_samples.clear()
self.malloc_samples.clear()
self.memory_malloc_samples.clear()
self.memory_malloc_count.clear()
self.memory_current_footprint.clear()
self.memory_max_footprint.clear()
self.memory_current_highwater_mark.clear()
self.memory_aggregate_footprint.clear()
self.memory_python_samples.clear()
self.memory_free_samples.clear()
self.memory_free_count.clear()
self.memcpy_samples.clear()
self.total_cpu_samples = 0.0
self.total_gpu_samples = 0.0
self.n_gpu_samples.clear()
self.total_memory_malloc_samples = 0.0
self.total_memory_free_samples = 0.0
self.current_footprint = 0.0
self.leak_score.clear()
self.last_malloc_triggered = (
Filename(""),
LineNumber(0),
Address("0x0"),
)
self.allocation_velocity = (0.0, 0.0)
self.per_line_footprint_samples.clear()
self.bytei_map.clear() | Reset all statistics except for memory footprint. | clear | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def clear_all(self) -> None:
"""Clear all statistics."""
self.clear()
self.current_footprint = 0
self.max_footprint = 0
self.max_footprint_loc = None
self.per_line_footprint_samples.clear() | Clear all statistics. | clear_all | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def start_clock(self) -> None:
"""Start the timer."""
self.start_time = time.time() | Start the timer. | start_clock | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def stop_clock(self) -> None:
"""Stop the timer."""
if self.start_time > 0:
self.elapsed_time += time.time() - self.start_time
self.start_time = 0 | Stop the timer. | stop_clock | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def build_function_stats(self, filename: Filename) -> ScaleneStatistics:
"""Produce aggregated statistics for each function."""
fn_stats = ScaleneStatistics()
fn_stats.elapsed_time = self.elapsed_time
fn_stats.total_cpu_samples = self.total_cpu_samples
fn_stats.total_gpu_samples = self.total_gpu_samples
fn_stats.n_gpu_samples = self.n_gpu_samples
fn_stats.total_memory_malloc_samples = self.total_memory_malloc_samples
first_line_no = LineNumber(1)
fn_stats.function_map = self.function_map
fn_stats.firstline_map = self.firstline_map
for line_no in self.function_map[filename]:
fn_name = self.function_map[filename][line_no]
if fn_name == "<module>":
continue
fn_stats.cpu_samples_c[fn_name][
first_line_no
] += self.cpu_samples_c[filename][line_no]
fn_stats.cpu_samples_python[fn_name][
first_line_no
] += self.cpu_samples_python[filename][line_no]
fn_stats.gpu_samples[fn_name][first_line_no] += self.gpu_samples[
filename
][line_no]
fn_stats.n_gpu_samples[fn_name][
first_line_no
] += self.n_gpu_samples[filename][line_no]
fn_stats.gpu_mem_samples[fn_name][
first_line_no
] += self.gpu_mem_samples[filename][line_no]
fn_stats.cpu_utilization[fn_name][
first_line_no
] += self.cpu_utilization[filename][line_no]
fn_stats.core_utilization[fn_name][
first_line_no
] += self.core_utilization[filename][line_no]
fn_stats.per_line_footprint_samples[fn_name][
first_line_no
] += self.per_line_footprint_samples[filename][line_no]
fn_stats.memory_malloc_count[fn_name][
first_line_no
] += self.memory_malloc_count[filename][line_no]
fn_stats.memory_free_count[fn_name][
first_line_no
] += self.memory_free_count[filename][line_no]
fn_stats.memory_malloc_samples[fn_name][
first_line_no
] += self.memory_malloc_samples[filename][line_no]
fn_stats.memory_python_samples[fn_name][
first_line_no
] += self.memory_python_samples[filename][line_no]
fn_stats.memory_free_samples[fn_name][
first_line_no
] += self.memory_free_samples[filename][line_no]
for index in self.bytei_map[filename][line_no]:
fn_stats.bytei_map[fn_name][first_line_no].add(
ByteCodeIndex(index) # was 0
)
fn_stats.memcpy_samples[fn_name][
first_line_no
] += self.memcpy_samples[filename][line_no]
fn_stats.leak_score[fn_name][first_line_no] = (
fn_stats.leak_score[fn_name][first_line_no][0]
+ self.leak_score[filename][line_no][0],
fn_stats.leak_score[fn_name][first_line_no][1]
+ self.leak_score[filename][line_no][1],
)
fn_stats.memory_max_footprint[fn_name][first_line_no] = max(
fn_stats.memory_max_footprint[fn_name][first_line_no],
self.memory_max_footprint[filename][line_no],
)
fn_stats.memory_aggregate_footprint[fn_name][
first_line_no
] += self.memory_aggregate_footprint[filename][line_no]
return fn_stats | Produce aggregated statistics for each function. | build_function_stats | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def output_stats(self, pid: int, dir_name: pathlib.Path) -> None:
"""Output statistics for a particular process to a given directory."""
payload: List[Any] = [
getattr(self, n) for n in ScaleneStatistics.payload_contents
]
# Create a file in the Python alias directory with the relevant info.
out_filename = os.path.join(
dir_name, f"scalene{pid}-{str(os.getpid())}"
)
with open(out_filename, "wb") as out_file:
cloudpickle.dump(payload, out_file) | Output statistics for a particular process to a given directory. | output_stats | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def increment_per_line_samples(
dest: Dict[Filename, Dict[LineNumber, T]],
src: Dict[Filename, Dict[LineNumber, T]],
) -> None:
"""Increment single-line dest samples by their value in src."""
for filename in src:
for lineno in src[filename]:
v = src[filename][lineno]
dest[filename][lineno] += v # type: ignore | Increment single-line dest samples by their value in src. | increment_per_line_samples | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def increment_cpu_utilization(
dest: Dict[Filename, Dict[LineNumber, RunningStats]],
src: Dict[Filename, Dict[LineNumber, RunningStats]],
) -> None:
"""Increment CPU utilization."""
for filename in src:
for lineno in src[filename]:
dest[filename][lineno] += src[filename][lineno] | Increment CPU utilization. | increment_cpu_utilization | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def increment_core_utilization(
dest: Dict[Filename, Dict[LineNumber, RunningStats]],
src: Dict[Filename, Dict[LineNumber, RunningStats]],
) -> None:
"""Increment core utilization."""
for filename in src:
for lineno in src[filename]:
dest[filename][lineno] += src[filename][lineno] | Increment core utilization. | increment_core_utilization | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def merge_stats(self, the_dir_name: pathlib.Path) -> None:
"""Merge all statistics in a given directory."""
the_dir = pathlib.Path(the_dir_name)
for f in list(the_dir.glob(os.path.join("**", "scalene*"))):
# Skip empty files.
if os.path.getsize(f) == 0:
continue
with open(f, "rb") as file:
unpickler = pickle.Unpickler(file)
try:
value = unpickler.load()
except EOFError:
# Empty file for some reason.
continue
x = ScaleneStatistics()
for i, n in enumerate(ScaleneStatistics.payload_contents):
setattr(x, n, value[i])
if x.max_footprint > self.max_footprint:
self.max_footprint = x.max_footprint
self.max_footprint_loc = x.max_footprint_loc
self.current_footprint = max(
self.current_footprint, x.current_footprint
)
self.increment_cpu_utilization(
self.cpu_utilization, x.cpu_utilization
)
self.increment_core_utilization(
self.core_utilization, x.core_utilization
)
self.elapsed_time = max(self.elapsed_time, x.elapsed_time)
self.alloc_samples += x.alloc_samples
self.stacks.update(x.stacks)
self.total_cpu_samples += x.total_cpu_samples
self.total_gpu_samples += x.total_gpu_samples
self.increment_per_line_samples(
self.cpu_samples_c, x.cpu_samples_c
)
self.increment_per_line_samples(
self.cpu_samples_python, x.cpu_samples_python
)
self.increment_per_line_samples(
self.gpu_samples, x.gpu_samples
)
self.increment_per_line_samples(
self.n_gpu_samples, x.n_gpu_samples
)
self.increment_per_line_samples(
self.gpu_mem_samples, x.gpu_mem_samples
)
self.increment_per_line_samples(
self.memcpy_samples, x.memcpy_samples
)
self.increment_per_line_samples(
self.per_line_footprint_samples,
x.per_line_footprint_samples,
)
# Sorting each of the per_line_footprint_sample lists by time, since per_line_footprint_samples
# is sent between processes. Samples are in the format [time, footprint]
for filename in self.per_line_footprint_samples:
for lineno in self.per_line_footprint_samples[filename]:
self.per_line_footprint_samples[filename][lineno].sort(
key=lambda x: x[0]
)
self.increment_per_line_samples(
self.memory_malloc_count, x.memory_malloc_count
)
self.increment_per_line_samples(
self.memory_malloc_samples, x.memory_malloc_samples
)
self.increment_per_line_samples(
self.memory_python_samples, x.memory_python_samples
)
self.increment_per_line_samples(
self.memory_free_samples, x.memory_free_samples
)
self.increment_per_line_samples(
self.memory_free_count, x.memory_free_count
)
for filename in x.bytei_map:
for lineno in x.bytei_map[filename]:
v = x.bytei_map[filename][lineno]
self.bytei_map[filename][lineno] |= v
self.memory_max_footprint[filename][lineno] = max(
self.memory_max_footprint[filename][lineno],
x.memory_max_footprint[filename][lineno],
)
for filename in x.cpu_samples:
self.cpu_samples[filename] += x.cpu_samples[filename]
self.total_memory_free_samples += x.total_memory_free_samples
self.total_memory_malloc_samples += (
x.total_memory_malloc_samples
)
self.memory_footprint_samples += x.memory_footprint_samples
# Sorting footprint samples by time when sample was taken.
# Samples are in the format [time, footprint]
self.memory_footprint_samples.sort(key=lambda x: x[0])
for k, val in x.function_map.items():
if k in self.function_map:
self.function_map[k].update(val)
else:
self.function_map[k] = val
self.firstline_map.update(x.firstline_map)
os.remove(f) | Merge all statistics in a given directory. | merge_stats | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def _set_accounting_mode(self) -> bool:
"""Returns true iff the accounting mode was set already for all GPUs or is now set."""
ngpus = self.__ngpus
for i in range(ngpus):
# Check if each GPU has accounting mode set.
h = self.__handle[i]
if (
pynvml.nvmlDeviceGetAccountingMode(h)
!= pynvml.NVML_FEATURE_ENABLED
):
# If not, try to set it. As a side effect, we turn persistence mode on
# so the driver is not unloaded (which undoes the accounting mode setting).
try:
pynvml.nvmlDeviceSetPersistenceMode(
h, pynvml.NVML_FEATURE_ENABLED
)
pynvml.nvmlDeviceSetAccountingMode(
h, pynvml.NVML_FEATURE_ENABLED
)
except pynvml.NVMLError:
# We don't have sufficient permissions.
return False
return True | Returns true iff the accounting mode was set already for all GPUs or is now set. | _set_accounting_mode | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def gpu_utilization(self, pid: int) -> float:
"""Return overall GPU utilization by pid if possible.
Otherwise, returns aggregate utilization across all running processes.
"""
if not self.has_gpu():
return 0
ngpus = self.__ngpus
accounting_on = self.__has_per_pid_accounting
utilization = 0
for i in range(ngpus):
h = self.__handle[i]
if accounting_on:
with contextlib.suppress(Exception):
utilization += pynvml.nvmlDeviceGetAccountingStats(
h, pid
).gpuUtilization
else:
try:
utilization += pynvml.nvmlDeviceGetUtilizationRates(h).gpu
except pynvml.NVMLError:
# Silently ignore NVML errors. "Fixes" https://github.com/plasma-umass/scalene/issues/471.
pass
return (utilization / ngpus) / 100.0 | Return overall GPU utilization by pid if possible.
Otherwise, returns aggregate utilization across all running processes. | gpu_utilization | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def has_gpu(self) -> bool:
"""True iff the system has a detected GPU."""
return self.__has_gpu | True iff the system has a detected GPU. | has_gpu | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def reinit(self) -> None:
"""Reinitialize the nvidia wrapper."""
if not self.has_gpu():
return
self.__handle = []
with contextlib.suppress(Exception):
pynvml.nvmlInit()
self.__ngpus = pynvml.nvmlDeviceGetCount()
self.__handle.extend(
pynvml.nvmlDeviceGetHandleByIndex(i)
for i in range(self.__ngpus)
) | Reinitialize the nvidia wrapper. | reinit | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def gpu_memory_usage(self, pid: int) -> float:
"""Returns GPU memory used by the process pid, in MB."""
# Adapted from https://github.com/gpuopenanalytics/pynvml/issues/21#issuecomment-678808658
if not self.has_gpu():
return 0
total_used_GPU_memory = 0
for i in range(self.__ngpus):
handle = self.__handle[i]
with contextlib.suppress(Exception):
for proc in pynvml.nvmlDeviceGetComputeRunningProcesses(
handle
):
# Only accumulate memory stats for the current pid.
if proc.usedGpuMemory and proc.pid == pid:
# First check is to protect against return of None
# from incompatible NVIDIA drivers.
total_used_GPU_memory += proc.usedGpuMemory / 1048576
return total_used_GPU_memory | Returns GPU memory used by the process pid, in MB. | gpu_memory_usage | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def get_stats(self) -> Tuple[float, float]:
"""Returns a tuple of (utilization %, memory in use)."""
if self.has_gpu():
total_load = self.gpu_utilization(self.__pid)
mem_used = self.gpu_memory_usage(self.__pid)
return (total_load, mem_used)
return (0.0, 0.0) | Returns a tuple of (utilization %, memory in use). | get_stats | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def reset(self) -> None:
"""Reset the timer."""
self.seconds = 0.0
self.interval = 0.0
self.is_set = False | Reset the timer. | reset | python | plasma-umass/scalene | scalene/scalene_client_timer.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_client_timer.py | Apache-2.0 |
def get_itimer(self) -> Tuple[float, float]:
"""Returns a tuple of (seconds, interval)."""
return self.seconds, self.interval | Returns a tuple of (seconds, interval). | get_itimer | python | plasma-umass/scalene | scalene/scalene_client_timer.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_client_timer.py | Apache-2.0 |
def yield_next_delay(self, elapsed: float) -> Tuple[bool, float]:
"""
Updates remaining_interval or remaining_seconds, returning whether
the timer signal should be passed up to the client and
the next delay. If the second return <= 0, then
there is no interval and the delay has elapsed.
"""
if self.delay_elapsed:
self.remaining_interval -= elapsed
is_done = self.remaining_interval <= 0
if is_done:
self.remaining_interval = self.interval
return is_done, self.remaining_interval
self.remaining_seconds -= elapsed
is_done = self.remaining_seconds <= 0
if is_done:
self.delay_elapsed = True
return (
is_done,
self.remaining_interval if is_done else self.remaining_seconds,
) | Updates remaining_interval or remaining_seconds, returning whether
the timer signal should be passed up to the client and
the next delay. If the second return <= 0, then
there is no interval and the delay has elapsed. | yield_next_delay | python | plasma-umass/scalene | scalene/scalene_client_timer.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_client_timer.py | Apache-2.0 |
def replacement_lock(scalene: Scalene) -> None:
class ReplacementLock:
"""Replace lock with a version that periodically yields and updates sleeping status."""
def __init__(self) -> None:
# Cache the original lock (which we replace)
# print("INITIALIZING LOCK")
self.__lock: threading.Lock = scalene.get_original_lock()
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool:
tident = threading.get_ident()
if blocking == 0:
blocking = False
start_time = time.perf_counter()
if blocking:
if timeout < 0:
interval = sys.getswitchinterval()
else:
interval = min(timeout, sys.getswitchinterval())
else:
interval = -1
while True:
scalene.set_thread_sleeping(tident)
acquired_lock = self.__lock.acquire(blocking, interval)
scalene.reset_thread_sleeping(tident)
if acquired_lock:
return True
if not blocking:
return False
# If a timeout was specified, check to see if it's expired.
if timeout != -1:
end_time = time.perf_counter()
if end_time - start_time >= timeout:
return False
def release(self) -> None:
self.__lock.release()
def locked(self) -> bool:
return self.__lock.locked()
def _at_fork_reinit(self) -> None:
try:
self.__lock._at_fork_reinit() # type: ignore
except AttributeError:
pass
def __enter__(self) -> None:
self.acquire()
def __exit__(self, type: str, value: str, traceback: Any) -> None:
self.release()
threading.Lock = ReplacementLock # type: ignore | Replace lock with a version that periodically yields and updates sleeping status. | replacement_lock | python | plasma-umass/scalene | scalene/replacement_lock.py | https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_lock.py | Apache-2.0 |
def reinit(self) -> None:
"""Here for compatibility with ScaleneGPU."""
pass | Here for compatibility with ScaleneGPU. | reinit | python | plasma-umass/scalene | scalene/scalene_neuron.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_neuron.py | Apache-2.0 |
def test_get_native_imported_modules(cleanup_imports):
# Mock the is_native method to control which modules are considered native
with patch.object(ScaleneAnalysis, 'is_native', return_value=True):
source_code = """
import math
import os
from sys import path
"""
expected_imports = ['import math', 'import os', 'from sys import path']
actual_imports = ScaleneAnalysis.get_native_imported_modules(source_code)
assert set(actual_imports) == set(expected_imports), "The list of native imports does not match the expected list."
with patch.object(ScaleneAnalysis, 'is_native', return_value=False):
source_code = """
import math
import os
from sys import path
"""
expected_imports = []
actual_imports = ScaleneAnalysis.get_native_imported_modules(source_code)
assert actual_imports == expected_imports, "The list of native imports should be empty." | expected_imports = ['import math', 'import os', 'from sys import path']
actual_imports = ScaleneAnalysis.get_native_imported_modules(source_code)
assert set(actual_imports) == set(expected_imports), "The list of native imports does not match the expected list."
with patch.object(ScaleneAnalysis, 'is_native', return_value=False):
source_code = | test_get_native_imported_modules | python | plasma-umass/scalene | tests/test_coverup_1.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_1.py | Apache-2.0 |
def free_port():
"""Find a free port for testing."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('', 0))
return s.getsockname()[1] | Find a free port for testing. | free_port | python | plasma-umass/scalene | tests/test_coverup_72.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_72.py | Apache-2.0 |
def occupied_port():
"""Create and occupy a port for testing."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.listen(1)
yield port
s.close() | Create and occupy a port for testing. | occupied_port | python | plasma-umass/scalene | tests/test_coverup_72.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_72.py | Apache-2.0 |
def test_is_port_available_with_free_port(free_port):
"""Test that is_port_available returns True for a free port."""
assert is_port_available(free_port) == True | Test that is_port_available returns True for a free port. | test_is_port_available_with_free_port | python | plasma-umass/scalene | tests/test_coverup_72.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_72.py | Apache-2.0 |
def test_is_port_available_with_occupied_port(occupied_port):
"""Test that is_port_available returns False for an occupied port."""
assert is_port_available(occupied_port) == False | Test that is_port_available returns False for an occupied port. | test_is_port_available_with_occupied_port | python | plasma-umass/scalene | tests/test_coverup_72.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_72.py | Apache-2.0 |
def calculate_z_serial_purepython(maxiter, zs, cs):
"""Calculate output list using Julia update rule"""
output = [0] * len(zs)
for i in range(len(zs)):
n = 0
z = zs[i]
c = cs[i]
while abs(z) < 2 and n < maxiter:
z = z * z + c
n += 1
output[i] = n
return output | Calculate output list using Julia update rule | calculate_z_serial_purepython | python | plasma-umass/scalene | benchmarks/julia1_nopil.py | https://github.com/plasma-umass/scalene/blob/master/benchmarks/julia1_nopil.py | Apache-2.0 |
def calc_pure_python(desired_width, max_iterations):
"""Create a list of complex coordinates (zs) and complex
parameters (cs), build Julia set, and display"""
x_step = (float(x2 - x1) / float(desired_width))
y_step = (float(y1 - y2) / float(desired_width))
x = []
y = []
ycoord = y2
while ycoord > y1:
y.append(ycoord)
ycoord += y_step
xcoord = x1
while xcoord < x2:
x.append(xcoord)
xcoord += x_step
# Build a list of coordinates and the initial condition for each cell.
# Note that our initial condition is a constant and could easily be removed;
# we use it to simulate a real-world scenario with several inputs to
# our function.
zs = []
cs = []
for ycoord in y:
for xcoord in x:
zs.append(complex(xcoord, ycoord))
cs.append(complex(c_real, c_imag))
print("Length of x:", len(x))
print("Total elements:", len(zs))
start_time = time.process_time()
output = calculate_z_serial_purepython(max_iterations, zs, cs)
end_time = time.process_time()
secs = end_time - start_time
sys.stdout.flush()
sys.stderr.flush()
output_str = "calculate_z_serial_purepython took " + str(secs) + " seconds"
print(output_str, file=sys.stderr)
sys.stderr.flush() | Create a list of complex coordinates (zs) and complex
parameters (cs), build Julia set, and display | calc_pure_python | python | plasma-umass/scalene | benchmarks/julia1_nopil.py | https://github.com/plasma-umass/scalene/blob/master/benchmarks/julia1_nopil.py | Apache-2.0 |
def __init__(self, D_in, H, D_out):
"""
In the constructor we construct three nn.Linear instances that we will use
in the forward pass.
"""
super(DynamicNet, self).__init__()
self.input_linear = torch.nn.Linear(D_in, H)
self.middle_linear = torch.nn.Linear(H, H)
self.output_linear = torch.nn.Linear(H, D_out) | In the constructor we construct three nn.Linear instances that we will use
in the forward pass. | __init__ | python | plasma-umass/scalene | test/testpyt.py | https://github.com/plasma-umass/scalene/blob/master/test/testpyt.py | Apache-2.0 |
def forward(self, x):
"""
For the forward pass of the model, we randomly choose either 0, 1, 2, or 3
and reuse the middle_linear Module that many times to compute hidden layer
representations.
Since each forward pass builds a dynamic computation graph, we can use normal
Python control-flow operators like loops or conditional statements when
defining the forward pass of the model.
Here we also see that it is perfectly safe to reuse the same Module many
times when defining a computational graph. This is a big improvement from Lua
Torch, where each Module could be used only once.
"""
h_relu = self.input_linear(x).clamp(min=0)
for _ in range(random.randint(0, 3)):
h_relu = self.middle_linear(h_relu).clamp(min=0)
y_pred = self.output_linear(h_relu)
return y_pred | For the forward pass of the model, we randomly choose either 0, 1, 2, or 3
and reuse the middle_linear Module that many times to compute hidden layer
representations.
Since each forward pass builds a dynamic computation graph, we can use normal
Python control-flow operators like loops or conditional statements when
defining the forward pass of the model.
Here we also see that it is perfectly safe to reuse the same Module many
times when defining a computational graph. This is a big improvement from Lua
Torch, where each Module could be used only once. | forward | python | plasma-umass/scalene | test/testpyt.py | https://github.com/plasma-umass/scalene/blob/master/test/testpyt.py | Apache-2.0 |
def total_size(o, handlers={}, verbose=False):
""" Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o) | Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements} | total_size | python | plasma-umass/scalene | test/test-size.py | https://github.com/plasma-umass/scalene/blob/master/test/test-size.py | Apache-2.0 |
def topoSort(roots, getParents):
"""Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node
"""
results = []
visited = set()
# Use iterative version to avoid stack limits for large datasets
stack = [(node, 0) for node in roots]
while stack:
current, state = stack.pop()
if state == 0:
# before recursing
if current not in visited:
visited.add(current)
stack.append((current, 1))
stack.extend((parent, 0) for parent in getParents(current))
else:
# after recursing
assert(current in visited)
results.append(current)
return results | Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node | topoSort | python | plasma-umass/scalene | test/original/bm_mdp.py | https://github.com/plasma-umass/scalene/blob/master/test/original/bm_mdp.py | Apache-2.0 |
def fit(self, X, y, iterations=500, disp=-1):
"""Fit the model using the training data.
Arguments:
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Notes: This function must set member variables such that a subsequent call
to get_params or predict uses the learned parameters, overwriting
any parameter values previously set by calling set_params.
"""
n_features = X.shape[1]
x = np.random.rand(n_features + 1)
minimizer = x
fmin = self.objective(x, X, y)
for t in range(iterations):
if disp != -1 and t % disp == 0:
print("At iteration", t, "f(minimizer) =", fmin)
alpha = 0.002 / math.sqrt(t + 1)
subgrad = self.subgradient(x, X, y)
x -= alpha * subgrad
objective = self.objective(x, X, y)
if (objective < fmin):
fmin = objective
minimizer = x
self.w = minimizer[:-1]
self.b = minimizer[-1] | Fit the model using the training data.
Arguments:
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Notes: This function must set member variables such that a subsequent call
to get_params or predict uses the learned parameters, overwriting
any parameter values previously set by calling set_params. | fit | python | plasma-umass/scalene | test/automatic/svm/svm-original.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-original.py | Apache-2.0 |
def objective(self, wb, X, y):
"""Compute the objective function for the SVM.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
obj (float): value of the objective function evaluated on X and y.
"""
n_samples = X.shape[0]
w = wb[:-1]
b = wb[-1]
sum = 0
for n in range(n_samples):
sum += max(0, 1 - y[n] * (np.dot(X[n], w) + b))
return sum + self.lambda1 * LA.norm(w, 1) + self.lambda2 * (LA.norm(w, 2) ** 2) | Compute the objective function for the SVM.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
obj (float): value of the objective function evaluated on X and y. | objective | python | plasma-umass/scalene | test/automatic/svm/svm-original.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-original.py | Apache-2.0 |
def subgradient(self, wb, X, y):
"""Compute the subgradient of the objective function.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
subgrad (ndarray, shape = (n_features+1,)):
subgradient of the objective function with respect to
the coefficients wb=[w,b] of the linear model
"""
n_samples = X.shape[0]
n_features = X.shape[1]
w = wb[:-1]
b = wb[-1]
subgrad = np.zeros(n_features + 1)
for i in range(n_features):
for n in range(n_samples):
subgrad[i] += (- y[n] * X[n][i]) if y[n] * (np.dot(X[n], w) + b) < 1 else 0
subgrad[i] += self.lambda1 * (-1 if w[i] < 0 else 1) + 2 * self.lambda2 * w[i]
for n in range(n_samples):
subgrad[-1] += - y[n] if y[n] * (np.dot(X[n], w) + b) < 1 else 0
return subgrad | Compute the subgradient of the objective function.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
subgrad (ndarray, shape = (n_features+1,)):
subgradient of the objective function with respect to
the coefficients wb=[w,b] of the linear model | subgradient | python | plasma-umass/scalene | test/automatic/svm/svm-original.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-original.py | Apache-2.0 |
def subgradient(self, wb, X, y):
"""Compute the subgradient of the objective function.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
subgrad (ndarray, shape = (n_features+1,)):
subgradient of the objective function with respect to
the coefficients wb=[w,b] of the linear model
"""
n_samples = X.shape[0]
n_features = X.shape[1]
w = wb[:-1]
b = wb[-1]
# Vectorized operations to replace for loops
subgrad = np.zeros(n_features + 1)
subgrad[:-1] = np.sum(-y[:, None] * X * (y * (X.dot(w) + b) < 1)[:, None], axis=0)
subgrad[:-1] += self.lambda1 * np.sign(w) + 2 * self.lambda2 * w
subgrad[-1] = np.sum(-y * (y * (X.dot(w) + b) < 1))
return subgrad | Compute the subgradient of the objective function.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
subgrad (ndarray, shape = (n_features+1,)):
subgradient of the objective function with respect to
the coefficients wb=[w,b] of the linear model | subgradient | python | plasma-umass/scalene | test/automatic/svm/svm-optimized.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-optimized.py | Apache-2.0 |
def split_params(data):
"""Split params between scanned and non-scanned"""
flat = traverse_util.flatten_dict(unfreeze(data))
split = {"standard": {}, "scanned_encoder": {}, "scanned_decoder": {}}
for k, v in flat.items():
if "FlaxBartEncoderLayers" in k:
split["scanned_encoder"][k] = v
elif "FlaxBartDecoderLayers" in k:
split["scanned_decoder"][k] = v
else:
split["standard"][k] = v
# remove empty keys
split = {k: v for k, v in split.items() if v}
for k, v in split.items():
split[k] = freeze(traverse_util.unflatten_dict(v))
return split | Split params between scanned and non-scanned | split_params | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def trainable_params(data, embeddings_only):
"""Keep only trainable parameters"""
if not embeddings_only:
return data
data = unfreeze(data)
trainable = {
"lm_head": data["lm_head"],
"model": {
"decoder": {
layer: data["model"]["decoder"][layer]
for layer in [
"embed_positions",
"embed_tokens",
"final_ln",
"layernorm_embedding",
]
}
},
}
return freeze(trainable) | Keep only trainable parameters | trainable_params | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def init_embeddings(model, params):
"""Reinitialize trainable embeddings"""
# Must match params in trainable_params() above
trainable_keypaths = [
"lm_head.kernel",
"model.decoder.embed_positions.embedding",
"model.decoder.embed_tokens.embedding",
"model.decoder.final_ln.bias",
"model.decoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.scale",
]
# Note: using private _missing_keys
init_keys = {tuple(k.split(".")) for k in trainable_keypaths}
model._missing_keys = init_keys
return model.init_weights(model.key, model.input_shape, params=params) | Reinitialize trainable embeddings | init_embeddings | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def create_learning_rate_fn() -> Callable[[int], jnp.array]:
"""Create the learning rate function."""
warmup_fn = optax.linear_schedule(
init_value=0.0,
end_value=training_args.learning_rate,
transition_steps=training_args.warmup_steps + 1, # ensure not 0
)
last_boundary = training_args.warmup_steps
# offset step when resuming
if training_args.lr_offset:
warmup_fn = optax.join_schedules(
schedules=[optax.constant_schedule(0.0), warmup_fn],
boundaries=[training_args.lr_offset],
)
last_boundary += training_args.lr_offset
if training_args.lr_decay is None:
return warmup_fn
elif training_args.lr_decay == "linear":
assert (
num_train_steps is not None
), "linear decay requires knowing the dataset length"
decay_fn = optax.linear_schedule(
init_value=training_args.learning_rate,
end_value=0,
transition_steps=num_train_steps - training_args.warmup_steps,
)
elif training_args.lr_decay == "exponential":
decay_fn = optax.exponential_decay(
init_value=training_args.learning_rate,
transition_steps=training_args.lr_transition_steps,
decay_rate=training_args.lr_decay_rate,
staircase=training_args.lr_staircase,
)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, decay_fn],
boundaries=[last_boundary],
)
return schedule_fn | Create the learning rate function. | main.create_learning_rate_fn | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def maybe_fn(fn, val, zeros, freq):
"""Call fn only if it is a logging step"""
return jax.lax.cond(
state.step % freq == 0,
fn,
lambda _: zeros,
val,
) | Call fn only if it is a logging step | main.main.train_step.maybe_fn | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def update_state_metrics(self, state):
"""Update internal state metrics (logged at each call to be used as x-axis)"""
self.state_dict = {
f'train/{k.split("_")[-1]}': state[k]
for k in ["step", "epoch", "train_time", "train_samples"]
}
# timing metrics
new_step = int(state["step"])
new_time = time.perf_counter()
if new_step > self.step:
# remove time for eval & save
delta_time = new_time - self.time - self.offset_time
self.offset_time = 0
time_per_step = delta_time / (new_step - self.step)
self.step = new_step
self.time = new_time
self.log_time("train_per_step", time_per_step, offset=False)
self.log_time("train_per_log", delta_time, offset=False) | Update internal state metrics (logged at each call to be used as x-axis) | main.update_state_metrics | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def _iter_body(state):
"""One step of power iteration."""
i, new_v, s, s_v, unused_run_step = state
new_v = new_v / jnp.linalg.norm(new_v)
s_v = jnp.einsum("ij,j->i", matrix, new_v, precision=precision)
s_new = jnp.einsum("i,i->", new_v, s_v, precision=precision)
return (
i + 1,
s_v,
s_new,
s_v,
jnp.greater(jnp.abs(s_new - s), error_tolerance),
) | One step of power iteration. | power_iteration._iter_body | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def power_iteration(
matrix,
num_iters=100,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
r"""Power iteration algorithm.
The power iteration algorithm takes a symmetric PSD matrix `A`, and produces
a scalar `\lambda` , which is the greatest (in absolute value) eigenvalue
of `A`, and a vector v, which is the corresponding eigenvector of `A`.
References:
[Wikipedia, 2021](https://en.wikipedia.org/wiki/Power_iteration)
Args:
matrix: the symmetric PSD matrix.
num_iters: Number of iterations.
error_tolerance: Iterative exit condition.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
Returns:
eigen vector, eigen value
"""
matrix_size = matrix.shape[-1]
def _iter_condition(state):
i, unused_v, unused_s, unused_s_v, run_step = state
return jnp.logical_and(i < num_iters, run_step)
def _iter_body(state):
"""One step of power iteration."""
i, new_v, s, s_v, unused_run_step = state
new_v = new_v / jnp.linalg.norm(new_v)
s_v = jnp.einsum("ij,j->i", matrix, new_v, precision=precision)
s_new = jnp.einsum("i,i->", new_v, s_v, precision=precision)
return (
i + 1,
s_v,
s_new,
s_v,
jnp.greater(jnp.abs(s_new - s), error_tolerance),
)
# Figure out how to use step as seed for random.
v_0 = (
np.random.RandomState(1729).uniform(-1.0, 1.0, matrix_size).astype(matrix.dtype)
)
init_state = tuple([0, v_0, jnp.zeros([], dtype=matrix.dtype), v_0, True])
_, v_out, s_out, _, _ = lax.while_loop(_iter_condition, _iter_body, init_state)
v_out = v_out / jnp.linalg.norm(v_out)
return v_out, s_out | matrix_size = matrix.shape[-1]
def _iter_condition(state):
i, unused_v, unused_s, unused_s_v, run_step = state
return jnp.logical_and(i < num_iters, run_step)
def _iter_body(state):
"""One step of power iteration. | power_iteration | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def mat_power(
mat_m,
p,
precision=lax.Precision.HIGHEST,
):
"""A simple matrix power method. M^p where p can be TracedValue."""
power = jnp.eye(mat_m.shape[0], dtype=_MAT_INV_PTH_ROOT_DTYPE)
def _iter_condition(state):
i, _, _ = state
return i > 0
def _iter_body(state):
i, power, mat = state
power = jax.lax.cond(
i % 2 == 1,
lambda: jnp.matmul(mat, power, precision=precision),
lambda: power,
)
i //= 2
mat = jnp.matmul(mat, mat, precision=precision)
return i, power, mat
_, result, _ = lax.while_loop(_iter_condition, _iter_body, (p, power, mat_m))
return result | A simple matrix power method. M^p where p can be TracedValue. | mat_power | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _pth_root_difference(w, alpha, beta, p):
"""Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p)."""
a = w + alpha
b = w + beta
a_minus_b = alpha - beta
exp = -1 / p
def _stable_subtract(b, a_minus_b):
# Mathematically identical to the target expression, with (w+beta)^(-1/p)
# term factored out and w cancellation in the subtraction.
return (b**exp) * jnp.expm1(exp * jnp.log1p(a_minus_b / b))
return jnp.where(
# Choose the branch with the best log1p approximation.
jnp.abs(a_minus_b / b) < jnp.abs(a_minus_b / a),
-_stable_subtract(a, -a_minus_b),
_stable_subtract(b, a_minus_b),
) | Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p). | _pth_root_difference | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
relative_matrix_epsilon=True,
lobpcg_topk_precondition=0,
lobpcg_max_iter=0,
):
"""Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses the Coupled newton iterations algorithm for
the computation of a matrix's inverse pth root.
References:
[Functions of Matrices, Theory and Computation,
Nicholas J Higham, Pg 184, Eq 7.18](
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
num_iters: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
value when computing inverse-pth root.
lobpcg_topk_precondition: If nonzero, specifies the number of top
eigenvectors to subtract out before performing LOBPCG. Note this makes
relative_matrix_epsilon essentially free.
lobpcg_max_iter: Maximum iteration count for LOBPCG, defaults to
`lobpcg_topk_precondition`.
Returns:
matrix^(-1/p) and the error
"""
# If the input is not square, materialize it from the concatenated form.
if matrix.shape[0] != matrix.shape[1]:
matrix = symmetric_matrices.materialize_matrix_from_concat(matrix)
assert matrix.shape[0] == matrix.shape[1]
# We use _MAT_INV_PTH_ROOT_DTYPE for the matrix inverse pth root.
# Switch to f64 if you have hardware that supports it. Enable the jax flag
# jax_enable_x64 for this to work.
matrix_size = matrix.shape[0]
orig_dtype = matrix.dtype
matrix = matrix.astype(_MAT_INV_PTH_ROOT_DTYPE)
alpha = jnp.asarray(-1.0 / p, _MAT_INV_PTH_ROOT_DTYPE)
identity = jnp.eye(matrix_size, dtype=_MAT_INV_PTH_ROOT_DTYPE)
original_matrix = matrix
if lobpcg_topk_precondition > 0:
# TODO(vladf): reuse previous top-k as the initial search directions
pad_shape = (matrix_size - lobpcg_topk_precondition, lobpcg_topk_precondition)
search_dirs = jnp.concatenate(
(jnp.eye(lobpcg_topk_precondition), jnp.zeros(pad_shape)), axis=0
)
eigvals, eigvecs, actual_iters = linalg.lobpcg_standard(
matrix,
search_dirs,
lobpcg_topk_precondition if lobpcg_max_iter == 0 else lobpcg_max_iter,
)
del actual_iters # TODO(vladf): return diagnostics dictionary
# The minimal eigenvalue among top-k becomes the maximal one in the whole
# matrix after deflation.
max_ev = jnp.min(eigvals)
deflation = eigvals - max_ev
scaled_vecs = eigvecs * jnp.sqrt(deflation)
# Deflate out top eigenvectors to reduce matrix condition number.
matrix -= scaled_vecs.dot(scaled_vecs.T, precision=jax.lax.Precision.HIGHEST)
# Only use power iteration if lobpcg wasn't already used to derive the
# top eigenvalue.
elif relative_matrix_epsilon:
_, max_ev = power_iteration(
matrix=matrix, num_iters=100, error_tolerance=1e-6, precision=precision
)
eigvals, eigvecs = None, None # Unused but required by pytype.
# Use absolute matrix epsilon scaling otherwise.
else:
max_ev = 1.0
eigvals, eigvecs = None, None # Unused but required by pytype.
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-6)
def _iter_condition(state):
(i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, run_step) = state
error_above_threshold = jnp.logical_and(error > error_tolerance, run_step)
return jnp.logical_and(i < num_iters, error_above_threshold)
def _iter_body(state):
(i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state
mat_m_i = (1 - alpha) * identity + alpha * mat_m
new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision)
new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision)
new_error = jnp.max(jnp.abs(new_mat_m - identity))
# sometimes error increases after an iteration before decreasing and
# converging. 1.2 factor is used to bound the maximal allowed increase.
return (i + 1, new_mat_m, new_mat_h, mat_h, new_error, new_error < error * 1.2)
if matrix_size == 1:
resultant_mat_h = (matrix + ridge_epsilon) ** alpha
error = jnp.array(0, jnp.float32)
else:
damped_matrix = matrix + ridge_epsilon * identity
z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix))
new_mat_m_0 = damped_matrix * z
new_error = jnp.max(jnp.abs(new_mat_m_0 - identity))
new_mat_h_0 = identity * jnp.power(z, 1.0 / p)
init_state = tuple([0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True])
_, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop(
_iter_condition, _iter_body, init_state
)
error = jnp.max(jnp.abs(mat_m - identity)).astype(jnp.float32)
is_converged = jnp.asarray(convergence, old_mat_h.dtype)
resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h
resultant_mat_h = jnp.asarray(resultant_mat_h, orig_dtype)
if lobpcg_topk_precondition > 0:
# Since we deflated the top eigenvectors prior to p-th root inverse,
# the resultant matrix has larger eigenvalues associated with those
# same eigenvectors, which we need to now re-deflate.
#
# Note that _pth_root_difference returns positive values for this
# particular argument ordering as min(eigvals) <= eigvals for the
# jnp.sqrt below.
pth_diff = _pth_root_difference(ridge_epsilon, jnp.min(eigvals), eigvals, p)
scaled_vecs = eigvecs * jnp.sqrt(pth_diff)
resultant_mat_h = (
resultant_mat_h.astype(scaled_vecs.dtype)
- scaled_vecs.dot(scaled_vecs.T, precision=jax.lax.Precision.HIGHEST)
).astype(orig_dtype)
mat_m = jnp.matmul(
mat_power(resultant_mat_h, p),
original_matrix,
precision=jax.lax.Precision.HIGHEST,
)
error = jnp.max(jnp.abs(mat_m - identity)).astype(jnp.float32)
return resultant_mat_h, error | Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses the Coupled newton iterations algorithm for
the computation of a matrix's inverse pth root.
References:
[Functions of Matrices, Theory and Computation,
Nicholas J Higham, Pg 184, Eq 7.18](
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
num_iters: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
value when computing inverse-pth root.
lobpcg_topk_precondition: If nonzero, specifies the number of top
eigenvectors to subtract out before performing LOBPCG. Note this makes
relative_matrix_epsilon essentially free.
lobpcg_max_iter: Maximum iteration count for LOBPCG, defaults to
`lobpcg_topk_precondition`.
Returns:
matrix^(-1/p) and the error | matrix_inverse_pth_root | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def merge_small_dims(shape_to_merge, max_dim):
"""Merge small dimensions.
If there are some small dimensions, we collapse them:
e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if max_dim = 1024
[1, 2, 768, 1, 2048] --> [2, 768, 2048]
Args:
shape_to_merge: Shape to merge small dimensions.
max_dim: Maximal dimension of output shape used in merging.
Returns:
Merged shape.
"""
if shape_to_merge and np.all(np.array(shape_to_merge) == 1):
return [1]
resulting_shape = []
product = 1
for d in shape_to_merge:
if product * d <= max_dim:
product *= d
else:
if product > 1:
resulting_shape.append(product)
product = d
if product > 1:
resulting_shape.append(product)
return resulting_shape | Merge small dimensions.
If there are some small dimensions, we collapse them:
e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if max_dim = 1024
[1, 2, 768, 1, 2048] --> [2, 768, 2048]
Args:
shape_to_merge: Shape to merge small dimensions.
max_dim: Maximal dimension of output shape used in merging.
Returns:
Merged shape. | merge_small_dims | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def pad_square_matrix(mat, max_size):
"""Pad a square matrix up to max_size.
Args:
mat: a matrix to pad.
max_size: matrix size requested.
Returns:
Given M returns [[M, 0], [0, I]]
"""
rows, cols = mat.shape
if rows != cols:
raise ValueError(
f"Must have rows == cols, instead got rows={rows}, cols={cols}"
)
if cols > max_size:
raise ValueError(
f"Must have cols <= max_size. Instead got cols={cols}, max_size={max_size}."
)
if rows == max_size:
return mat
pad_size = max_size - rows
zs1 = jnp.zeros([rows, pad_size], dtype=mat.dtype)
zs2 = jnp.zeros([pad_size, rows], dtype=mat.dtype)
eye = jnp.eye(pad_size, dtype=mat.dtype)
mat = jnp.concatenate([mat, zs1], 1)
mat = jnp.concatenate([mat, jnp.concatenate([zs2, eye], 1)], 0)
return mat | Pad a square matrix up to max_size.
Args:
mat: a matrix to pad.
max_size: matrix size requested.
Returns:
Given M returns [[M, 0], [0, I]] | pad_square_matrix | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def make_sliced_padding(
symmetric_block_size,
num_blocks,
starting_block,
dtype,
):
"""Returns padding for symmetric block matrix.
Specifically, the padding is given concatenated rectangular matrices
representing the lower-triangular rows below the starting block. For example,
if we want to pad the symmetric matrix
M = [[A, B^T]
[B, C]],
the desired output (in terms of the full matrix) with num_blocks = 4 is
M_padded = [[A, B^T, 0, 0]
[B, C, 0, 0]
[0, 0, I, 0]
0, 0, 0, I].
We would represent M as the block matrix mat = [A, B, C]. In this form, the
additional padding to provide has form [0, 0, I, 0, 0, 0, I] (only the lower
triangular parts in the third and fourth rows).
Args:
symmetric_block_size: The size of each block.
num_blocks: The total number of blocks.
starting_block: The block where to start the padding.
dtype: The type to use for the blocks.
"""
if starting_block == num_blocks:
return jnp.zeros(shape=(symmetric_block_size, 0), dtype=dtype)
blocks = []
for i in range(starting_block, num_blocks):
blocks.append(
jnp.zeros(
shape=(symmetric_block_size, symmetric_block_size * i), dtype=dtype
)
)
blocks.append(jnp.eye(symmetric_block_size, dtype=dtype))
return jnp.concatenate(blocks, axis=-1) | Returns padding for symmetric block matrix.
Specifically, the padding is given concatenated rectangular matrices
representing the lower-triangular rows below the starting block. For example,
if we want to pad the symmetric matrix
M = [[A, B^T]
[B, C]],
the desired output (in terms of the full matrix) with num_blocks = 4 is
M_padded = [[A, B^T, 0, 0]
[B, C, 0, 0]
[0, 0, I, 0]
0, 0, 0, I].
We would represent M as the block matrix mat = [A, B, C]. In this form, the
additional padding to provide has form [0, 0, I, 0, 0, 0, I] (only the lower
triangular parts in the third and fourth rows).
Args:
symmetric_block_size: The size of each block.
num_blocks: The total number of blocks.
starting_block: The block where to start the padding.
dtype: The type to use for the blocks. | make_sliced_padding | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def pad_block_symmetric_matrix(
mat,
symmetric_block_size,
max_num_blocks,
):
"""Returns the padded blocked symmetric matrix.
The size of the padded matrix will be:
[symmetric_block_size, symmetric_block_size * max_num_blocks]
The input matrix can either:
- Be square with size less or equal to symmetric_block_size. In this case,
mat will first be padded to a square matrix of size symmetric_block_size,
and then be padded again up to the full size of the blocked matrix.
- Be a rectangle with number of rows equal to block size.
In this case, number of columns must be a multiple of number of rows, and
the ratio must correspond to a block representation of a symmetric matrix.
That is, the ratio must have form x * (x + 1) / 2. Here, x represents the
number of block rows represented by the matrix.
Args:
mat: The input block matrix.
symmetric_block_size: The size of blocks.
max_num_blocks: The largest number of blocks to pad to.
"""
rows, cols = mat.shape
if rows > symmetric_block_size:
raise ValueError(
"Must have rows <= symmetric_block_size. Instead got "
f"rows={rows}, symmetric_block_size={symmetric_block_size}."
)
if rows > cols:
raise ValueError(
f"Must have rows <= cols, instead got rows={rows}, cols={cols}."
)
if cols > symmetric_block_size * max_num_blocks:
raise ValueError(
"Must have cols <= symmetric_block_size * max_num_blocks "
f"Instead got cols={cols}, "
f"symmetric_block_size={symmetric_block_size}, "
f"max_num_blocks={max_num_blocks}."
)
if rows < symmetric_block_size:
mat = pad_square_matrix(mat, max_size=symmetric_block_size)
# Update rows and cols after possibly padding in pad_square_matrix.
rows, cols = mat.shape
assert rows == symmetric_block_size
assert cols % rows == 0
filled_blocks = cols // rows
padding_blocks = make_sliced_padding(
symmetric_block_size=symmetric_block_size,
num_blocks=symmetric_matrices.num_blocks_from_total_blocks(max_num_blocks),
starting_block=symmetric_matrices.num_blocks_from_total_blocks(filled_blocks),
dtype=mat.dtype,
)
return jnp.concatenate([mat, padding_blocks], axis=-1) | Returns the padded blocked symmetric matrix.
The size of the padded matrix will be:
[symmetric_block_size, symmetric_block_size * max_num_blocks]
The input matrix can either:
- Be square with size less or equal to symmetric_block_size. In this case,
mat will first be padded to a square matrix of size symmetric_block_size,
and then be padded again up to the full size of the blocked matrix.
- Be a rectangle with number of rows equal to block size.
In this case, number of columns must be a multiple of number of rows, and
the ratio must correspond to a block representation of a symmetric matrix.
That is, the ratio must have form x * (x + 1) / 2. Here, x represents the
number of block rows represented by the matrix.
Args:
mat: The input block matrix.
symmetric_block_size: The size of blocks.
max_num_blocks: The largest number of blocks to pad to. | pad_block_symmetric_matrix | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def pad_vector(vec, max_size):
"""Pad a vector to a max_size.
Args:
vec: a vector to pad.
max_size: matrix size requested.
Returns:
Given V returns [V, 0]
"""
size = vec.shape[0]
assert size <= max_size
if size == max_size:
return vec
pad_size = max_size - size
zs1 = jnp.zeros([pad_size], dtype=vec.dtype)
return jnp.concatenate([vec, zs1], 0) | Pad a vector to a max_size.
Args:
vec: a vector to pad.
max_size: matrix size requested.
Returns:
Given V returns [V, 0] | pad_vector | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def efficient_cond(predicate, compute_fn, init_state, *args, **kwargs):
"""Avoids wasteful buffer allocation with XLA."""
def _iter_body(unused_state):
results = compute_fn(*args, **kwargs)
return tuple([False] + list(results))
def _iter_condition(state):
return state[0]
results = jax.lax.while_loop(
_iter_condition, _iter_body, tuple([predicate] + init_state)
)
return tuple(results[1:]) | Avoids wasteful buffer allocation with XLA. | efficient_cond | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def partition(self, tensor):
"""Partition tensor into blocks."""
assert tensor.shape == self._shape
tensors = [tensor]
for i, indices in self._splits:
tensors_local = []
for t in tensors:
tensors_local.extend(jnp.split(t, indices_or_sections=indices, axis=i))
tensors = tensors_local
return tensors | Partition tensor into blocks. | partition | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def merge_partitions(self, partitions):
"""Merge partitions back to original shape."""
for i, indices in reversed(self._splits):
n = len(indices) + 1
partial_merged_tensors = []
ind = 0
while ind < len(partitions):
partial_merged_tensors.append(
jnp.concatenate(partitions[ind : ind + n], axis=i)
)
ind += n
partitions = partial_merged_tensors
assert len(partitions) == 1
return partitions[0] | Merge partitions back to original shape. | merge_partitions | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def gram_weighted_update(old_stats, g, axis, w1, w2, precision=None):
"""Updated statistics via weighted average with new Gram matrix.
Returns w₁ R + w₂ Gᵀ G where R is `old_stats` and G is the matrix whose
columns are the flattened slices of the tensor `g` along the given `axis`.
(So, `old_stats` and the returned matrix have dimensions n x n where
n = `g.shape[axis]`).
Args:
old_stats: Old statistics.
g: Gradient tensor.
axis: Axis along which to slice `g`.
w1: Scalar weight for old statistics.
w2: Scalar weight for new Gram matrix.
precision: Optional precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise)
b) lax.Precision.HIGH (increased precision, slower)
c) lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
Weighted average of old and new statistics.
"""
axes = [i for i in range(g.ndim) if i != axis]
gram_matrix = jnp.tensordot(g, g, axes=(axes, axes), precision=precision)
return w1 * old_stats + w2 * gram_matrix | Updated statistics via weighted average with new Gram matrix.
Returns w₁ R + w₂ Gᵀ G where R is `old_stats` and G is the matrix whose
columns are the flattened slices of the tensor `g` along the given `axis`.
(So, `old_stats` and the returned matrix have dimensions n x n where
n = `g.shape[axis]`).
Args:
old_stats: Old statistics.
g: Gradient tensor.
axis: Axis along which to slice `g`.
w1: Scalar weight for old statistics.
w2: Scalar weight for new Gram matrix.
precision: Optional precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise)
b) lax.Precision.HIGH (increased precision, slower)
c) lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
Weighted average of old and new statistics. | gram_weighted_update | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def __init__(
self,
param,
block_size,
merge_small_dims_block_size,
best_effort_shape_interpretation,
preconditioner_type=PreconditionerType.ALL,
):
"""Initializes the preconditioner.
Args:
param: parameter to precondition.
block_size: Block size used to split param.
merge_small_dims_block_size: Block size for merging dims.
best_effort_shape_interpretation: Whether to collapse/merge dims together.
preconditioner_type: Type of preconditioner to use.
"""
self._original_shape = param.shape
self._transformed_shape = param.shape
if best_effort_shape_interpretation:
self._transformed_shape = merge_small_dims(
self._original_shape, merge_small_dims_block_size
)
reshaped_param = jnp.reshape(param, self._transformed_shape)
self._partitioner = BlockPartitioner(reshaped_param, block_size)
self._preconditioner_type = preconditioner_type | Initializes the preconditioner.
Args:
param: parameter to precondition.
block_size: Block size used to split param.
merge_small_dims_block_size: Block size for merging dims.
best_effort_shape_interpretation: Whether to collapse/merge dims together.
preconditioner_type: Type of preconditioner to use. | __init__ | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def updated_statistics_from_grad(
self,
stats,
grad,
w1,
w2,
to_float=None,
from_float=None,
precision=None,
):
"""Update statistics from gradients.
Args:
stats: Old statistics or its Cholesky factor if `cholesky` is True.
grad: Gradient to compute statistics from.
w1: Weight for old statistics.
w2: Weight for new statistics.
to_float: Optional function for converting stats to floating point.
from_float: Optional function for converting from floating point.
precision: Optional precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise)
b) lax.Precision.HIGH (increased precision, slower)
c) lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
A list of updated gradient statistics for each partition.
"""
to_float = to_float if to_float is not None else (lambda x: x)
from_float = from_float if from_float is not None else (lambda x: x)
update = functools.partial(gram_weighted_update, precision=precision)
reshaped_grad = jnp.reshape(grad, self._transformed_shape)
partitioned_grads = self._partitioner.partition(reshaped_grad)
new_stats = []
index = 0
for g in partitioned_grads:
should_preconditioned_dims = self.should_precondition_dims()
num_preconditioners = sum(should_preconditioned_dims)
for axis in range(num_preconditioners):
new_stat = update(to_float(stats[index]), g, axis, w1, w2)
new_stats.append(from_float(new_stat))
index += 1
return new_stats | Update statistics from gradients.
Args:
stats: Old statistics or its Cholesky factor if `cholesky` is True.
grad: Gradient to compute statistics from.
w1: Weight for old statistics.
w2: Weight for new statistics.
to_float: Optional function for converting stats to floating point.
from_float: Optional function for converting from floating point.
precision: Optional precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise)
b) lax.Precision.HIGH (increased precision, slower)
c) lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
A list of updated gradient statistics for each partition. | updated_statistics_from_grad | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def should_precondition_dims(self):
"""A vector containing indicator indicating if the dim is preconditioned."""
split_sizes = self._partitioner.split_sizes()
rank = len(split_sizes)
if self._preconditioner_type == PreconditionerType.ALL or rank <= 1:
return [True] * rank
else:
return [True] * (rank - 1) + [False] | A vector containing indicator indicating if the dim is preconditioned. | should_precondition_dims | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def shapes_for_preconditioners(self):
"""Returns shape from statistics."""
split_sizes = self._partitioner.split_sizes()
rank = len(split_sizes)
# We ignore preconditioner types if rank == 1
preconditioner_shapes = []
for t in itertools.product(*split_sizes):
if self._preconditioner_type == PreconditionerType.ALL or rank <= 1:
preconditioner_shapes.extend([[d, d] for d in t])
else:
preconditioner_shapes.extend([[d, d] for d in t[:-1]])
return preconditioner_shapes | Returns shape from statistics. | shapes_for_preconditioners | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def exponent_for_preconditioner(self):
"""Returns exponent to use for inverse-pth root M^{-1/p}."""
should_preconditioned_dims = self.should_precondition_dims()
num_preconditioners = sum(should_preconditioned_dims)
return 2 * num_preconditioners | Returns exponent to use for inverse-pth root M^{-1/p}. | exponent_for_preconditioner | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def preconditioned_grad(self, grad, preconditioners):
"""Precondition the gradient.
Args:
grad: A gradient tensor to precondition.
preconditioners: A list of preconditioners to apply.
Returns:
A preconditioned gradient.
"""
reshaped_grad = jnp.reshape(grad, self._transformed_shape)
partitioned_grads = self._partitioner.partition(reshaped_grad)
preconditioned_partitioned_grads = []
for i, g in enumerate(partitioned_grads):
should_preconditioned_dims = self.should_precondition_dims()
num_preconditioners = sum(should_preconditioned_dims)
preconditioners_for_grad = preconditioners[
i * num_preconditioners : (i + 1) * num_preconditioners
]
precond_g = g
rank = len(g.shape)
for j, precondition in enumerate(should_preconditioned_dims):
if precondition:
precond_g = jnp.tensordot(
precond_g, preconditioners_for_grad[j], axes=[[0], [0]]
)
else:
precond_g = jnp.transpose(precond_g, axes=(*range(1, rank), 0))
preconditioned_partitioned_grads.append(precond_g)
merged_grad = self._partitioner.merge_partitions(
preconditioned_partitioned_grads
)
return jnp.reshape(merged_grad, self._original_shape) | Precondition the gradient.
Args:
grad: A gradient tensor to precondition.
preconditioners: A list of preconditioners to apply.
Returns:
A preconditioned gradient. | preconditioned_grad | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _convert_to_parameter_stats(global_stats, local_stat, convert_statistics=True):
"""Creates parameter stats from sharded stats."""
index_start = int(local_stat.index_start)
index_end = int(len(local_stat.sizes)) + index_start
statistics = global_stats.statistics[index_start:index_end, :, :]
preconditioners = global_stats.preconditioners[index_start:index_end, :, :]
new_statistics = []
new_preconditioners = []
for i, size in enumerate(local_stat.sizes):
new_statistics.append(statistics[i][:size, :size])
new_preconditioners.append(preconditioners[i][:size, :size])
if not convert_statistics:
new_statistics = None
return ParameterStats(
local_stat.diagonal_statistics,
new_statistics,
new_preconditioners,
local_stat.diagonal_momentum,
local_stat.momentum,
local_stat.training_metrics,
) | Creates parameter stats from sharded stats. | _convert_to_parameter_stats | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _convert_from_parameter_stats(parameter_stats, local_stats):
"""Creates sharded stats from paramter stats."""
return LocalShardedParameterStats(
parameter_stats.diagonal_statistics,
parameter_stats.diagonal_momentum,
parameter_stats.momentum,
parameter_stats.training_metrics,
local_stats.index_start,
local_stats.sizes,
) | Creates sharded stats from paramter stats. | _convert_from_parameter_stats | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _add_error_into_local_stats(local_stats, errors, inverse_failure_threshold):
"""Adds errors back into local statistics."""
new_local_stats = []
for local_stat in local_stats:
if local_stat.sizes:
index_start = int(local_stat.index_start)
index_end = int(len(local_stat.sizes)) + index_start
per_stat_error = errors[index_start:index_end]
else:
per_stat_error = jnp.array(0, jnp.float32)
if local_stat.sizes:
per_stat_error = jnp.where(
jnp.logical_and(
per_stat_error > 0.0, per_stat_error != inverse_failure_threshold
),
per_stat_error,
local_stat.training_metrics.inverse_pth_root_errors,
)
new_local_stats.append(
LocalShardedParameterStats(
local_stat.diagonal_statistics,
local_stat.diagonal_momentum,
local_stat.momentum,
TrainingMetrics(per_stat_error),
local_stat.index_start,
local_stat.sizes,
)
)
return new_local_stats | Adds errors back into local statistics. | _add_error_into_local_stats | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def batch(x, num_devices):
"""Batch `x` so that so that leading axis is num_devices."""
n = len(x)
b = int(n / num_devices)
return jnp.stack([jnp.stack(x[idx : idx + b]) for idx in range(0, n, b)]) | Batch `x` so that so that leading axis is num_devices. | batch | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def unbatch(batched_values):
"""Unbatch values across leading axis and return a list of elements."""
b1, b2 = batched_values.shape[0], batched_values.shape[1]
results = []
for v_array in jnp.split(batched_values, indices_or_sections=b1, axis=0):
v_array = jnp.squeeze(v_array)
# b2 = batches (number of preconditioner computation) per core.
if b2 > 1:
for v in jnp.split(v_array, indices_or_sections=b2, axis=0):
results.append(jnp.squeeze(v))
else:
results.append(v_array)
return results | Unbatch values across leading axis and return a list of elements. | unbatch | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _graft_type_has_diagonal_statistics():
"""Returns True if using diagonal firt order method for grafting."""
return graft_type != GraftingType.SGD and graft_type != GraftingType.SQRT_N | Returns True if using diagonal firt order method for grafting. | distributed_shampoo._graft_type_has_diagonal_statistics | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def preconditioner_from_params(param):
"""Returns a Preconditioner object for given param."""
return Preconditioner(
param,
block_size,
merge_small_dims_block_size,
best_effort_shape_interpretation,
precondtioner_type,
) | Returns a Preconditioner object for given param. | distributed_shampoo.preconditioner_from_params | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def sharded_init_fn(params):
"""Returns optimizer state (for PJIT mode).
Args:
params: the parameters that should be updated.
"""
params_flat, treedef = jax.tree_flatten(params)
# Find max size to pad to.
max_size = 0
for param in params_flat:
preconditioner = preconditioner_from_params(param)
if not _skip_preconditioning(param):
shapes = preconditioner.shapes_for_preconditioners()
sizes = [s[0] for s in shapes]
max_size = max(max(sizes), max_size)
padded_statistics = []
padded_preconditioners = []
local_stats_flat = []
exponents = []
for param in params_flat:
preconditioner = preconditioner_from_params(param)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
statistics = []
preconditioners = []
index_start = len(padded_statistics)
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
statistics = [
matrix_epsilon * jnp.eye(max_size, dtype=jnp.float32)
for s in shapes
]
preconditioners = [jnp.eye(max_size, dtype=jnp.float32) for s in shapes]
padded_statistics.extend(statistics)
padded_preconditioners.extend(preconditioners)
exponent = (
preconditioner.exponent_for_preconditioner()
if exponent_override == 0
else exponent_override
)
exponents.extend([exponent] * len(shapes))
diagonal_statistics = _quantize_diagonal_statistics(jnp.zeros_like(param))
diagonal_momentum = _quantize_momentum(jnp.zeros_like(param))
momentum = _quantize_momentum(jnp.zeros_like(param))
local_stats_flat.append(
LocalShardedParameterStats(
diagonal_statistics,
diagonal_momentum,
momentum,
init_training_metrics(len(sizes)),
index_start,
sizes,
)
)
local_stats = jax.tree_unflatten(treedef, local_stats_flat)
to_pad = -len(padded_statistics) % num_devices_for_pjit
if max_size == 0:
to_pad = num_devices_for_pjit
max_size = block_size
stat_dtype = jnp.float32
else:
stat_dtype = padded_statistics[0].dtype
# Pad the statistics and preconditioner matrices to be a multiple of
# num devices.
# TODO(rohananil): Relax to only the size of the mesh axis where the dim
# is split on.
padded_statistics.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)]
)
padded_preconditioners.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
global_stats = GlobalShardedParameterStats(
jnp.stack(padded_statistics),
jnp.stack(padded_preconditioners),
jnp.stack(exponents),
)
return ShampooState(
count=jnp.zeros([], jnp.int32),
stats=ShardedShampooStats(global_stats, local_stats),
) | Returns optimizer state (for PJIT mode).
Args:
params: the parameters that should be updated. | distributed_shampoo.sharded_init_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _remove_leading_sharding_annotation(pspec):
"""Mapping from N-d to (N-1)-d, used for quantization, factoring etc."""
# None and PSpec(None) are valid PSpecs.
if pspec and len(pspec) > 1:
return pjit.PartitionSpec(*pspec[1:])
else:
return [] | Mapping from N-d to (N-1)-d, used for quantization, factoring etc. | distributed_shampoo._remove_leading_sharding_annotation | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def sharded_init_partition_spec_fn(
params, params_partition_spec, partition_spec_for_statistics
):
"""Returns a parallel state tree with PartitionSpec associated with state.
Args:
params: A pytree with params.
params_partition_spec: A pytree with PartitionSpec for params.
partition_spec_for_statistics: PartitionSpec for the statistics.
"""
# Parallel lists of spec, and params.
param_pspec_flat, _ = jax.tree_flatten(
params_partition_spec, is_leaf=lambda x: x is None
)
params_flat, treedef = jax.tree_flatten(params)
assert param_pspec_flat
assert params_flat
# Step is replicated across cores.
# None means cores.
local_stats_flat = []
num_statistics = 0
for param, param_pspec in zip(params_flat, param_pspec_flat):
param_clone = jnp.zeros(param.shape, dtype=param.dtype)
preconditioner = preconditioner_from_params(param_clone)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
index_start = num_statistics
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
num_statistics += len(shapes)
qdtype = quantized_dtype_for_momentum_buffers(param)
m1_pspec = param_pspec
m2_pspec = param_pspec
m1_scale_pspec = []
m2_scale_pspec = []
if qdtype != jnp.float32:
m1_scale_pspec = _remove_leading_sharding_annotation(m1_pspec)
m2_scale_pspec = _remove_leading_sharding_annotation(m2_pspec)
local_stats_flat.append(
LocalShardedParameterStats(
QuantizedValue(
param_pspec, [], [], jnp.float32, False, list(param.shape)
),
QuantizedValue(
m1_pspec, [], m1_scale_pspec, qdtype, False, list(param.shape)
),
QuantizedValue(
m2_pspec, [], m2_scale_pspec, qdtype, False, list(param.shape)
),
init_training_metrics_pspec(),
index_start,
sizes,
)
)
local_stats = jax.tree_unflatten(treedef, local_stats_flat)
global_stats = GlobalShardedParameterStats(
partition_spec_for_statistics,
partition_spec_for_statistics,
pjit.PartitionSpec(),
)
count_pspec = pjit.PartitionSpec()
return ShampooState(
count=count_pspec, stats=ShardedShampooStats(global_stats, local_stats)
) | Returns a parallel state tree with PartitionSpec associated with state.
Args:
params: A pytree with params.
params_partition_spec: A pytree with PartitionSpec for params.
partition_spec_for_statistics: PartitionSpec for the statistics. | distributed_shampoo.sharded_init_partition_spec_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def sharded_init_shape_and_dtype_fn(params):
"""Returns a parallel state tree with shape, dtype associated with state.
Args:
params: A pytree with params.
"""
# Parallel lists of spec, and params.
params_flat, treedef = jax.tree_flatten(params)
assert params_flat
# Step is replicated across cores.
# None means cores.
local_stats_flat = []
num_statistics = 0
for param in params_flat:
param_clone = jnp.zeros(param.shape, dtype=param.dtype)
preconditioner = preconditioner_from_params(param_clone)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
index_start = num_statistics
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
num_statistics += len(shapes)
qdtype = quantized_dtype_for_momentum_buffers(param)
m1_shape_and_dtype = [list(param.shape), param.dtype]
m2_shape_and_dtype = [list(param.shape), param.dtype]
m1_scale_shape_and_dtype = []
m2_scale_shape_and_dtype = []
if qdtype != jnp.float32:
m1_scale_shape_and_dtype = [list(param.shape)[1:], qdtype]
m2_scale_shape_and_dtype = [list(param.shape)[1:], qdtype]
diagonal_statistics_shape_and_dtype = [list(param.shape), param.dtype]
local_stats_flat.append(
LocalShardedParameterStats(
QuantizedValue(
diagonal_statistics_shape_and_dtype,
[],
[],
jnp.float32,
False,
list(param.shape),
),
QuantizedValue(
m1_shape_and_dtype,
[],
m1_scale_shape_and_dtype,
qdtype,
False,
list(param.shape),
),
QuantizedValue(
m2_shape_and_dtype,
[],
m2_scale_shape_and_dtype,
qdtype,
False,
list(param.shape),
),
init_training_metrics_shapes(len(sizes)),
index_start,
sizes,
)
)
local_stats = jax.tree_unflatten(treedef, local_stats_flat)
max_statistics_size = _max_statistics_size_from_params(params_flat)
to_pad = -num_statistics % num_devices_for_pjit
num_statistics += to_pad
if num_statistics == 0:
num_statistics = num_devices_for_pjit
max_statistics_size = block_size
statistics_shape = [num_statistics, max_statistics_size, max_statistics_size]
global_stats = GlobalShardedParameterStats(
[statistics_shape, jnp.float32],
[statistics_shape, jnp.float32],
[[num_statistics], jnp.int32],
)
return ShampooState(
count=[[], jnp.float32],
stats=ShardedShampooStats(global_stats, local_stats),
) | Returns a parallel state tree with shape, dtype associated with state.
Args:
params: A pytree with params. | distributed_shampoo.sharded_init_shape_and_dtype_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def sharded_update_fn(grads, state, params):
"""Transform the input gradient and update all statistics in sharded mode.
Args:
grads: the gradient tensors for the parameters.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
params_flat, treedef = jax.tree_flatten(params)
grads_flat = treedef.flatten_up_to(grads)
global_stats = state.stats.global_stats
local_stats_flat = treedef.flatten_up_to(state.stats.local_stats)
stats_flat = [
_convert_to_parameter_stats(global_stats, local_stat)
for local_stat in local_stats_flat
]
new_stats_flat = jax.tree_map(
lambda g, s, p: _compute_stats(g, s, p, state.count),
grads_flat,
stats_flat,
params_flat,
)
outputs = jax.tree_map(
lambda g, s, p: _transform_grad(g, s, p, state.count),
grads_flat,
new_stats_flat,
params_flat,
)
updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())
updates = jax.tree_unflatten(treedef, updates_flat)
# Create new local_stats
new_local_stats_flat = [
_convert_from_parameter_stats(new_stat, local_stat)
for new_stat, local_stat in zip(new_stats_flat, local_stats_flat)
]
max_size = global_stats.statistics.shape[1]
new_padded_statistics = []
for stat in new_stats_flat:
new_padded_statistics.extend(
[pad_square_matrix(stat, max_size) for stat in stat.statistics]
)
# Create global stats
# TODO(rohananil): Preconditioner is not updated every step, so cost of
# stack/pad can be obviated away.
# Pad the statistics and preconditioner matrices to be a multiple of
# num devices.
# TODO(rohananil): Relax to only the size of the mesh axis where the dim
# is split on.
to_pad = -len(new_padded_statistics) % num_devices_for_pjit
if not new_padded_statistics:
to_pad = num_devices_for_pjit
stat_dtype = jnp.float32
else:
stat_dtype = new_padded_statistics[0].dtype
new_padded_statistics.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)]
)
new_stacked_padded_statistics = jnp.stack(new_padded_statistics)
new_stacked_padded_statistics = pjit.with_sharding_constraint(
new_stacked_padded_statistics, statistics_partition_spec
)
def _internal_inverse_pth_root_all():
preconditioners, errors = _matrix_inverse_pth_root_pjit(
new_stacked_padded_statistics,
global_stats.exponents,
statistics_partition_spec,
)
return preconditioners, errors
if preconditioning_compute_steps == 1:
new_preconditioners, errors = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
preconditioners_init = new_stacked_padded_statistics
n = new_stacked_padded_statistics.shape[0]
errors_init = jnp.ones([n], jnp.float32) * inverse_failure_threshold
init_state = [preconditioners_init, errors_init]
perform_step = state.count % preconditioning_compute_steps == 0
new_preconditioners, errors = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state
)
new_local_stats_flat = _add_error_into_local_stats(
new_local_stats_flat, errors, inverse_failure_threshold
)
new_local_stats = jax.tree_unflatten(treedef, new_local_stats_flat)
errors = errors.reshape((-1, 1, 1))
predicate = jnp.logical_or(
jnp.isnan(errors), errors >= inverse_failure_threshold
).astype(new_preconditioners.dtype)
# TODO(rohananil): Check for numerical instabilities.
new_conditional_preconditioners = (
predicate * global_stats.preconditioners
+ (1.0 - predicate) * new_preconditioners
)
new_global_stats = GlobalShardedParameterStats(
new_stacked_padded_statistics,
new_conditional_preconditioners,
global_stats.exponents,
)
new_shampoo_state = ShampooState(
count=state.count + 1,
stats=ShardedShampooStats(new_global_stats, new_local_stats),
)
return updates, new_shampoo_state | Transform the input gradient and update all statistics in sharded mode.
Args:
grads: the gradient tensors for the parameters.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state. | distributed_shampoo.sharded_update_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def init_fn(params):
"""Initialise the optimiser's state."""
def _init(param):
preconditioner = preconditioner_from_params(param)
statistics = []
preconditioners = []
if not _skip_preconditioning(param):
shapes = preconditioner.shapes_for_preconditioners()
statistics = [
matrix_epsilon * jnp.eye(s[0], dtype=jnp.float32) for s in shapes
]
preconditioners = [jnp.eye(s[0], dtype=jnp.float32) for s in shapes]
diagonal_statistics = []
if _graft_type_has_diagonal_statistics():
diagonal_statistics = jnp.zeros_like(param)
diagonal_momentum = _quantize_momentum(jnp.zeros_like(param))
momentum = _quantize_momentum(jnp.zeros_like(param))
return ParameterStats(
_quantize_diagonal_statistics(diagonal_statistics),
_maybe_quantize_statistics(statistics),
_maybe_quantize_preconditioners(preconditioners),
diagonal_momentum,
momentum,
init_training_metrics(len(statistics)),
)
return ShampooState(
count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params)
) | Initialise the optimiser's state. | distributed_shampoo.init_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _compute_stats(grad, state, param, step):
"""Compute per-parameter statistics."""
preconditioner = preconditioner_from_params(param)
new_statistics = [[]] * len(state.statistics)
w1 = beta2
w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)
if not _skip_preconditioning(param):
def compute_updated_statistics():
return preconditioner.updated_statistics_from_grad(
state.statistics,
grad,
w1=w1,
w2=w2,
to_float=_to_float,
from_float=lambda x: _maybe_quantize_statistics([x])[0],
precision=tensordot_precision,
)
if statistics_compute_steps > 1:
perform_step = step % statistics_compute_steps == 0
init_state = state.statistics
new_statistics = list(
efficient_cond(perform_step, compute_updated_statistics, init_state)
)
else:
new_statistics = compute_updated_statistics()
return ParameterStats(
state.diagonal_statistics,
new_statistics,
state.preconditioners,
state.diagonal_momentum,
state.momentum,
state.training_metrics,
) | Compute per-parameter statistics. | distributed_shampoo._compute_stats | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _pmap_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
):
"""Computes preconditioners for given statistics in states in PMAP mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner.
"""
if batch_axis_name:
num_devices = lax.psum(1, batch_axis_name)
else:
num_devices = 1
num_statistics = len(statistics)
# Pad statistics and exponents to next multiple of num_devices.
packed_statistics = [pad_square_matrix(stat, max_size) for stat in statistics]
to_pad = -num_statistics % num_devices
packed_statistics.extend(
[jnp.eye(max_size, dtype=packed_statistics[0].dtype) for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
if not packed_statistics:
return states
all_statistics = batch(packed_statistics, num_devices)
all_exponents = batch(exponents, num_devices)
def _internal_inverse_pth_root_all():
if batch_axis_name:
current_replica = lax.axis_index(batch_axis_name)
preconditioners, errors = _matrix_inverse_pth_root_vmap(
all_statistics[current_replica], all_exponents[current_replica]
)
preconditioners = jax.lax.all_gather(preconditioners, batch_axis_name)
errors = jax.lax.all_gather(errors, batch_axis_name)
preconditioners_flat = unbatch(preconditioners)
errors_flat = unbatch(errors)
else:
preconditioners, errors = _matrix_inverse_pth_root_vmap(
all_statistics[0], all_exponents[0]
)
preconditioners_flat = unbatch(jnp.stack([preconditioners]))
errors_flat = unbatch(jnp.stack([errors]))
return preconditioners_flat, errors_flat
if preconditioning_compute_steps == 1:
preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
preconditioners_init = packed_statistics
errors_init = [inverse_failure_threshold] * len(packed_statistics)
init_state = [preconditioners_init, errors_init]
perform_step = step % preconditioning_compute_steps == 0
preconditioners_flat, errors_flat = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state
)
def _skip(error):
condition = jnp.logical_or(
jnp.isnan(error), error >= inverse_failure_threshold
)
return condition.astype(error.dtype)
def _select_preconditioner(error, new_p, old_p):
return lax.cond(
_skip(error), lambda _: old_p, lambda _: new_p, operand=None
)
new_preconditioners_flat = []
new_errors_flat = []
for p, shape, prev_p, error in zip(
preconditioners_flat, original_shapes, prev_preconditioners, errors_flat
):
new_preconditioners_flat.append(
_select_preconditioner(error, p[: shape[0], : shape[1]], prev_p)
)
new_errors_flat.append(error)
assert len(states) == len(num_statistics_per_state)
assert len(new_preconditioners_flat) == num_statistics
assert len(new_errors_flat) == num_statistics
# Add back empty preconditioners so we that we can set the optimizer state.
preconditioners_for_states = []
idx = 0
errors_for_states = []
for num_statistics, state in zip(num_statistics_per_state, states):
if num_statistics == 0:
preconditioners_for_states.append([])
errors_for_states.append(jnp.array(0, jnp.float32))
else:
preconditioners_for_state = new_preconditioners_flat[
idx : idx + num_statistics
]
assert len(state.statistics) == len(preconditioners_for_state)
preconditioners_for_states.append(preconditioners_for_state)
errors_for_state = jnp.stack(
new_errors_flat[idx : idx + num_statistics]
)
assert len(state.statistics) == len(errors_for_state)
errors_for_states.append(errors_for_state)
idx += num_statistics
new_states = []
for state, new_preconditioners, new_errors in zip(
states, preconditioners_for_states, errors_for_states
):
if state.statistics:
new_errors = jnp.where(
jnp.logical_and(
new_errors > 0.0, new_errors != inverse_failure_threshold
),
new_errors,
state.training_metrics.inverse_pth_root_errors,
)
new_training_metrics = TrainingMetrics(new_errors)
new_states.append(
ParameterStats(
state.diagonal_statistics,
state.statistics,
new_preconditioners,
state.diagonal_momentum,
state.momentum,
new_training_metrics,
)
)
return new_states | Computes preconditioners for given statistics in states in PMAP mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner. | distributed_shampoo._pmap_compute_preconditioners | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _pmap_quantized_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
):
"""Computes preconditioners for given statistics in states in PMAP mode.
For quantization, each statistic is represented by three values:
quantized matrix, diagonal, and bucket sizes, we run inverse pth-roots
without ever recreating the original matrix in f32.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner.
"""
num_devices = lax.psum(1, batch_axis_name)
num_statistics = len(statistics)
quantized_dtype = quantized_dtype_for_second_moment_statistics_buffers()
# Complexity here is around: shapes needing be statically shaped,
# our custom quantization type requires a different type of packing.
# Parallel tensors:
# quantized [dxd]
# diagonals [d] f32
# bucket_sizes [d] f32
packed_quantized_statistics = [
pad_square_matrix(stat.quantized, max_size) for stat in statistics
]
packed_quantized_diagonals = [
pad_vector(stat.diagonal, max_size) for stat in statistics
]
packed_quantized_bucket_sizes = [
pad_vector(stat.bucket_size, max_size) for stat in statistics
]
to_pad = -num_statistics % num_devices
padded_eye = jnp.eye(max_size, dtype=jnp.float32)
quantized_eye = QuantizedValue.from_float_value(
padded_eye, quantized_dtype, True
)
packed_quantized_statistics.extend(
[quantized_eye.quantized for _ in range(to_pad)]
)
packed_quantized_diagonals.extend(
[quantized_eye.diagonal for _ in range(to_pad)]
)
packed_quantized_bucket_sizes.extend(
[quantized_eye.bucket_size for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
if not packed_quantized_statistics:
return states
all_quantized_statistics = batch(packed_quantized_statistics, num_devices)
all_quantized_diagonals = batch(packed_quantized_diagonals, num_devices)
all_quantized_bucket_sizes = batch(packed_quantized_bucket_sizes, num_devices)
all_exponents = batch(exponents, num_devices)
def _internal_inverse_pth_root_all():
current_replica = lax.axis_index(batch_axis_name)
(
quantized_preconditioners,
quantized_diagonals,
quantized_bucket_sizes,
errors,
) = _quantized_matrix_inverse_pth_root_vmap(
all_quantized_statistics[current_replica],
all_quantized_diagonals[current_replica],
all_quantized_bucket_sizes[current_replica],
all_exponents[current_replica],
)
quantized_preconditioners = jax.lax.all_gather(
quantized_preconditioners, batch_axis_name
)
quantized_diagonals = jax.lax.all_gather(
quantized_diagonals, batch_axis_name
)
quantized_bucket_sizes = jax.lax.all_gather(
quantized_bucket_sizes, batch_axis_name
)
errors = jax.lax.all_gather(errors, batch_axis_name)
quantized_preconditioners_flat = unbatch(quantized_preconditioners)
quantized_diagonals_flat = unbatch(quantized_diagonals)
quantized_bucket_sizes_flat = unbatch(quantized_bucket_sizes)
errors_flat = unbatch(errors)
return (
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
errors_flat,
)
if preconditioning_compute_steps == 1:
(
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
errors_flat,
) = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
quantized_preconditioners_init = packed_quantized_statistics
quantized_diagonals_init = packed_quantized_diagonals
quantized_bucket_sizes_init = packed_quantized_bucket_sizes
errors_init = [inverse_failure_threshold] * len(
quantized_preconditioners_init
)
init_state = [
quantized_preconditioners_init,
quantized_diagonals_init,
quantized_bucket_sizes_init,
errors_init,
]
perform_step = step % preconditioning_compute_steps == 0
(
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
errors_flat,
) = efficient_cond(perform_step, _internal_inverse_pth_root_all, init_state)
def _skip(error):
condition = jnp.logical_or(
jnp.isnan(error), error >= inverse_failure_threshold
)
return condition.astype(error.dtype)
def _select_preconditioner(error, new_p, old_p):
return lax.cond(
_skip(error), lambda _: old_p, lambda _: new_p, operand=None
)
new_quantized_preconditioners_flat = []
new_quantized_diagonals_flat = []
new_quantized_bucket_sizes_flat = []
new_errors_flat = []
for p, d, b, shape, prev_p, error in zip(
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
original_shapes,
prev_preconditioners,
errors_flat,
):
new_quantized_preconditioners_flat.append(
_select_preconditioner(
error, p[: shape[0], : shape[1]], prev_p.quantized
)
)
new_quantized_diagonals_flat.append(
_select_preconditioner(error, d[: shape[0]], prev_p.diagonal)
)
new_quantized_bucket_sizes_flat.append(
_select_preconditioner(error, b[: shape[0]], prev_p.bucket_size)
)
new_errors_flat.append(error)
assert len(states) == len(num_statistics_per_state)
assert len(new_quantized_preconditioners_flat) == num_statistics
assert len(new_quantized_diagonals_flat) == num_statistics
assert len(new_quantized_bucket_sizes_flat) == num_statistics
# Add back empty preconditioners so we that we can set the optimizer state.
preconditioners_for_states = []
errors_for_states = []
idx = 0
for num_statistics, state in zip(num_statistics_per_state, states):
if num_statistics == 0:
preconditioners_for_states.append([])
errors_for_states.append(jnp.array(0, jnp.float32))
else:
quantized_preconditioners_for_state = (
new_quantized_preconditioners_flat[idx : idx + num_statistics]
)
quantized_diagonals_for_state = new_quantized_diagonals_flat[
idx : idx + num_statistics
]
quantized_bucket_sizes_for_state = new_quantized_bucket_sizes_flat[
idx : idx + num_statistics
]
errors_for_state = jnp.stack(
new_errors_flat[idx : idx + num_statistics]
)
assert len(state.statistics) == len(quantized_preconditioners_for_state)
assert len(state.statistics) == len(quantized_diagonals_for_state)
assert len(state.statistics) == len(quantized_bucket_sizes_for_state)
assert len(state.statistics) == len(errors_for_state)
quantized_preconditioners = []
for qv, qd, qb in zip(
quantized_preconditioners_for_state,
quantized_diagonals_for_state,
quantized_bucket_sizes_for_state,
):
quantized_preconditioners.append(
QuantizedValue(qv, qd, qb, qv.dtype, True, list(qv.shape))
)
preconditioners_for_states.append(quantized_preconditioners)
errors_for_states.append(errors_for_state)
idx += num_statistics
new_states = []
for state, new_preconditioners, new_errors in zip(
states, preconditioners_for_states, errors_for_states
):
if state.statistics:
new_errors = jnp.where(
jnp.logical_and(
new_errors > 0.0, new_errors != inverse_failure_threshold
),
new_errors,
state.training_metrics.inverse_pth_root_errors,
)
new_training_metrics = TrainingMetrics(new_errors)
new_states.append(
ParameterStats(
state.diagonal_statistics,
state.statistics,
new_preconditioners,
state.diagonal_momentum,
state.momentum,
new_training_metrics,
)
)
return new_states | Computes preconditioners for given statistics in states in PMAP mode.
For quantization, each statistic is represented by three values:
quantized matrix, diagonal, and bucket sizes, we run inverse pth-roots
without ever recreating the original matrix in f32.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner. | distributed_shampoo._pmap_quantized_compute_preconditioners | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _pjit_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
):
"""Computes preconditioners for given statistics in states in PJIT mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner.
"""
num_statistics = len(statistics)
to_pad = -num_statistics % num_devices_for_pjit
padded_statistics = [pad_square_matrix(stat, max_size) for stat in statistics]
padded_statistics.extend(
[jnp.eye(max_size, dtype=padded_statistics[0].dtype) for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
all_statistics = jnp.stack(padded_statistics)
all_exponents = jnp.stack(exponents)
def _internal_inverse_pth_root_all():
preconditioners, errors = _matrix_inverse_pth_root_pjit(
all_statistics, all_exponents
)
b1 = preconditioners.shape[0]
def split(batched_values):
return [
jnp.squeeze(v)
for v in jnp.split(batched_values, indices_or_sections=b1, axis=0)
]
return split(preconditioners), split(errors)
if preconditioning_compute_steps == 1:
preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
preconditioners_init = padded_statistics
errors_init = [inverse_failure_threshold] * len(padded_statistics)
init_state = [preconditioners_init, errors_init]
perform_step = step % preconditioning_compute_steps == 0
preconditioners_flat, errors_flat = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state
)
def _skip(error):
condition = jnp.logical_or(
jnp.isnan(error), error >= inverse_failure_threshold
)
return condition.astype(error.dtype)
def _select_preconditioner(error, new_p, old_p):
return lax.cond(
_skip(error), lambda _: old_p, lambda _: new_p, operand=None
)
new_preconditioners_flat = []
new_errors_flat = []
for p, shape, prev_p, error in zip(
preconditioners_flat, original_shapes, prev_preconditioners, errors_flat
):
new_preconditioners_flat.append(
_select_preconditioner(error, p[: shape[0], : shape[1]], prev_p)
)
new_errors_flat.append(error)
assert len(states) == len(num_statistics_per_state)
assert len(new_preconditioners_flat) == num_statistics
# Add back empty preconditioners so we that we can set the optimizer state.
preconditioners_for_states = []
errors_for_states = []
idx = 0
for num_statistics, state in zip(num_statistics_per_state, states):
if num_statistics == 0:
preconditioners_for_states.append([])
errors_for_states.append(jnp.array(0, jnp.float32))
else:
preconditioners_for_state = new_preconditioners_flat[
idx : idx + num_statistics
]
assert len(state.statistics) == len(preconditioners_for_state)
preconditioners_for_states.append(preconditioners_for_state)
errors_for_state = jnp.stack(
new_errors_flat[idx : idx + num_statistics]
)
assert len(state.statistics) == len(errors_for_state)
errors_for_states.append(errors_for_state)
idx += num_statistics
new_states = []
for state, new_preconditioners, new_errors in zip(
states, preconditioners_for_states, errors_for_states
):
if state.statistics:
new_errors = jnp.where(
jnp.logical_and(
new_errors > 0.0, new_errors != inverse_failure_threshold
),
new_errors,
state.training_metrics.inverse_pth_root_errors,
)
new_training_metrics = TrainingMetrics(new_errors)
new_states.append(
ParameterStats(
state.diagonal_statistics,
state.statistics,
new_preconditioners,
state.diagonal_momentum,
state.momentum,
new_training_metrics,
)
)
return new_states | Computes preconditioners for given statistics in states in PJIT mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner. | distributed_shampoo._pjit_compute_preconditioners | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _compute_preconditioners(states, params, step):
"""Computes preconditioners for given statistics in states.
Args:
states: A list of optimizer states.
params: A list of params.
step: Current step number
Returns:
New optimizer states after computing the preconditioner.
"""
statistics = []
num_statistics_per_state = []
original_shapes = []
exponents = []
max_size = 0
prev_preconditioners = []
for state, param in zip(states, params):
num_statistics = len(state.statistics)
num_statistics_per_state.append(num_statistics)
original_shapes_for_state = []
if num_statistics > 0:
preconditioner = preconditioner_from_params(param)
for statistic in state.statistics:
exponents.append(
preconditioner.exponent_for_preconditioner()
if exponent_override == 0
else exponent_override
)
original_shapes_for_state.append(statistic.shape)
max_size = max(max_size, statistic.shape[0])
statistics.extend(state.statistics)
prev_preconditioners.extend(state.preconditioners)
original_shapes.extend(original_shapes_for_state)
if not shard_optimizer_states:
# Quantization is only enabled if batch_axis_name is not set.
quantized_dtype = quantized_dtype_for_second_moment_statistics_buffers()
if quantized_dtype == jnp.float32:
return _pmap_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
)
else:
return _pmap_quantized_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
)
else:
return _pjit_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
) | Computes preconditioners for given statistics in states.
Args:
states: A list of optimizer states.
params: A list of params.
step: Current step number
Returns:
New optimizer states after computing the preconditioner. | distributed_shampoo._compute_preconditioners | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _transform_grad(grad, state, param, step):
"""Transform per-parameter gradients."""
preconditioner = preconditioner_from_params(param)
sgd_update = grad
new_diagonal_statistics = state.diagonal_statistics.to_float()
if (
graft_type == GraftingType.ADAGRAD
or graft_type == GraftingType.ADAGRAD_NORMALIZED
):
scaled_grad = grad
if graft_type == GraftingType.ADAGRAD_NORMALIZED:
scaled_grad = grad / (jnp.linalg.norm(grad) + 1e-16)
new_diagonal_statistics = state.diagonal_statistics.to_float() + jnp.square(
scaled_grad
)
adagrad_update = scaled_grad / (
jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon
)
grafting_update = adagrad_update
elif (
graft_type == GraftingType.RMSPROP
or graft_type == GraftingType.RMSPROP_NORMALIZED
):
scaled_grad = grad
if graft_type == GraftingType.RMSPROP_NORMALIZED:
scaled_grad = grad / (jnp.linalg.norm(grad) + 1e-16)
w1 = beta2
w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)
new_diagonal_statistics = (
w1 * state.diagonal_statistics.to_float() + w2 * jnp.square(scaled_grad)
)
rmsprop_update = scaled_grad / (
jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon
)
if clip_by_scaled_gradient_norm:
scaled_grad_norm = jnp.linalg.norm(rmsprop_update) / (
jnp.sqrt(float(rmsprop_update.size))
)
clipping_denom = jnp.maximum(
1.0, scaled_grad_norm / clip_by_scaled_gradient_norm
)
rmsprop_update /= clipping_denom
grafting_update = rmsprop_update
elif graft_type == GraftingType.SGD:
grafting_update = sgd_update
else:
grafting_update = jnp.ones_like(sgd_update) * jnp.sign(sgd_update)
lr = learning_rate
if callable(learning_rate):
lr = learning_rate(step)
preconditioner_multiplier = lr if not decoupled_learning_rate else 1.0
grafting_update = grafting_update * preconditioner_multiplier
precond_grad = grad
if not _skip_preconditioning(param):
precond_grad = preconditioner.preconditioned_grad(
precond_grad, _maybe_dequantize_preconditioners(state.preconditioners)
)
else:
precond_grad = grafting_update
grafting_update_norm = jnp.linalg.norm(grafting_update)
precond_grad_norm = jnp.linalg.norm(precond_grad)
multiplier = grafting_update_norm / (precond_grad_norm + 1e-16)
shampoo_update = precond_grad * multiplier
shampoo_update_with_wd = shampoo_update
grafting_update_with_wd = grafting_update
if weight_decay != 0 and not decoupled_weight_decay:
shampoo_update_with_wd = shampoo_update + weight_decay * param
grafting_update_with_wd = grafting_update + weight_decay * param
w = (1.0 - beta1) if moving_average_for_momentum else 1.0
shampoo_update_with_wd_momentum = (
state.momentum.to_float() * beta1 + w * shampoo_update_with_wd
)
grafting_update_with_wd_momentum = (
state.diagonal_momentum.to_float() * beta1 + w * grafting_update_with_wd
)
run_shampoo = (step >= start_preconditioning_step).astype(
grafting_update_with_wd_momentum.dtype
)
momentum_update = (
run_shampoo * shampoo_update_with_wd_momentum
+ (1.0 - run_shampoo) * grafting_update_with_wd_momentum
)
wd_update = (
run_shampoo * shampoo_update_with_wd
+ (1.0 - run_shampoo) * grafting_update_with_wd
)
nesterov_momentum_update = momentum_update
if nesterov:
nesterov_momentum_update = w * wd_update + beta1 * momentum_update
if weight_decay != 0 and decoupled_weight_decay:
nesterov_momentum_update = (
nesterov_momentum_update + lr * weight_decay * param
)
momentum_multiplier = lr if decoupled_learning_rate else 1.0
transformed_update = -1.0 * momentum_multiplier * nesterov_momentum_update
new_diagonal_momentum = grafting_update_with_wd_momentum
new_momentum = shampoo_update_with_wd_momentum
param_stats = ParameterStats(
_quantize_diagonal_statistics(new_diagonal_statistics),
state.statistics,
state.preconditioners,
_quantize_momentum(new_diagonal_momentum),
_quantize_momentum(new_momentum),
state.training_metrics,
)
return transformed_update, param_stats | Transform per-parameter gradients. | distributed_shampoo._transform_grad | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def update_fn(grads, state, params):
"""Transform the input gradient and update all statistics.
Args:
grads: the gradient tensors for the parameters
and any custom gradients for preconditioners.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
params_flat, treedef = jax.tree_flatten(params)
stats_flat = treedef.flatten_up_to(state.stats)
grads_flat = treedef.flatten_up_to(grads)
stats_grads = grads_flat
new_stats_flat = jax.tree_map(
lambda g, s, p: _compute_stats(g, s, p, state.count),
stats_grads,
stats_flat,
params_flat,
)
new_stats_flat = _compute_preconditioners(
new_stats_flat, params_flat, state.count
)
outputs = jax.tree_map(
lambda g, s, p: _transform_grad(g, s, p, state.count),
grads_flat,
new_stats_flat,
params_flat,
)
updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())
updates = jax.tree_unflatten(treedef, updates_flat)
new_stats = jax.tree_unflatten(treedef, new_stats_flat)
new_state = ShampooState(count=state.count + 1, stats=new_stats)
return updates, new_state | Transform the input gradient and update all statistics.
Args:
grads: the gradient tensors for the parameters
and any custom gradients for preconditioners.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state. | distributed_shampoo.update_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.