File size: 7,961 Bytes
7934b29 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
def get_forward_hook(name, trainer, rank, logger, dump_to_file=False):
"""
A forward hook to dump all of the module input and output norms. It is called at every time after forward() has computed an output.
Only float type input/output tensor norms are computed.
For more details about the forward hook, check https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_forward_hook.html
Args:
name: tensor name
trainer: PTL trainer
rank: worker rank
logger: PTL log function
dump_to_file: wether dump the csv file to the disk
"""
if dump_to_file:
os.makedirs('debug_info', exist_ok=True)
fp = open(f'debug_info/forward_{name}_rank{rank}.txt', 'w')
header = False
def forward_hook(module, inputs, outputs):
nonlocal header
nonlocal fp
if trainer.training:
values = []
headers = []
for n, i in enumerate(inputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('input')
input_norm = i.data.norm()
values.append(f'{input_norm}')
logger(f'debug_info_forward/{name}_rank{rank}_input{n}', input_norm)
if isinstance(outputs, tuple):
for n, i in enumerate(outputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('output')
output_norm = i.data.norm()
values.append(f'{output_norm}')
logger(f'debug_info_forward/{name}_rank{rank}_output{n}', output_norm)
else:
headers.append('output')
values.append(f'{outputs.data.norm()}')
values.append(f'{trainer.global_step}')
if not header:
headers.append('step')
fp.write(','.join(headers) + '\n')
header = True
fp.write(','.join(values) + '\n')
fp.flush()
return forward_hook
def get_backward_hook(name, trainer, rank, logger, dump_to_file=False):
"""
A backward hook to dump all of the module input and output grad norms. The hook will be called every time the gradients with respect to module inputs are computed.
Only float type input/output grad tensor norms are computed.
For more details about the backward hook, check https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_full_backward_hook.html
Args:
name: tensor name
trainer: PTL trainer
rank: worker rank
logger: PTL log function
dump_to_file: wether dump the csv file to the disk
"""
if dump_to_file:
os.makedirs('debug_info', exist_ok=True)
fp = open(f'debug_info/backward_{name}_rank{rank}.txt', 'w')
header = False
def backward_hook(module, inputs, outputs):
nonlocal header
nonlocal fp
if trainer.training:
values = []
headers = []
for n, i in enumerate(inputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('input')
input_norm = i.data.norm()
values.append(f'{input_norm}')
logger(f'debug_info_backward/{name}_rank{rank}_input{n}', input_norm)
if isinstance(outputs, tuple):
for n, i in enumerate(outputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('output')
output_norm = i.data.norm()
values.append(f'{output_norm}')
logger(f'debug_info_backward/{name}_rank{rank}_output{n}', output_norm)
else:
headers.append('output')
values.append(f'{outputs.data.norm()}')
values.append(f'{trainer.global_step}')
if not header:
headers.append('step')
fp.write(','.join(headers) + '\n')
header = True
fp.write(','.join(values) + '\n')
fp.flush()
return backward_hook
def get_tensor_hook(module, name, trainer, rank, logger, dump_to_file=False):
"""
A tensor hook to dump all of the tensor weight norms and grad norms at the end of each of the backward steps.
For more details about the tensor hook, check https://pytorch.org/docs/stable/generated/torch.Tensor.register_hook.html
Args:
module: the model module
name: tensor name
trainer: PTL trainer
rank: worker rank
logger: PTL log function
dump_to_file: wether dump the csv file to the disk
"""
if dump_to_file:
os.makedirs('debug_info', exist_ok=True)
fp = open(f'debug_info/tensor_{name}_rank{rank}.csv', 'w')
header = False
def tensor_hook(grad):
nonlocal header
nonlocal fp
values = []
headers = []
weight = module.get_parameter(name)
weight_norm = weight.data.norm()
grad_norm = grad.data.norm()
logger(f'debug_info_tensors/{name}_rank{rank}_grad_norm', grad_norm)
logger(f'debug_info_tensors/{name}_rank{rank}_weight_norm', weight_norm)
values.append(f'{weight_norm}')
values.append(f'{grad_norm}')
values.append(f'{trainer.global_step}')
if dump_to_file:
if not header:
headers.append('weight')
headers.append('grad')
headers.append('step')
fp.write(','.join(headers) + '\n')
header = True
fp.write(','.join(values) + '\n')
fp.flush()
return grad
return tensor_hook
def register_debug_hooks(module, trainer, logger, dump_to_file=False):
"""
Register debug hooks. It can
1. track the module forward step input/ouput norm
2. track the module backward step input/output grad norm
3. track the parameter weight norm and grad norm.
"""
# default rank 0
rank = 0
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
for name, tensor in module.named_parameters():
if name != '':
tensor.register_hook(get_tensor_hook(module, name, trainer, rank, logger, dump_to_file))
for name, layer in module.named_modules():
if name != '':
layer.register_forward_hook(get_forward_hook(name, trainer, rank, logger, dump_to_file))
layer.register_full_backward_hook(get_backward_hook(name, trainer, rank, logger, dump_to_file))
|