name: megatron_bert | |
restore_from_path: null # used when starting from a .nemo file | |
trainer: | |
devices: 2 | |
num_nodes: 1 | |
accelerator: gpu | |
precision: 16 | |
logger: False # logger provided by exp_manager | |
enable_checkpointing: False | |
replace_sampler_ddp: False | |
max_epochs: -1 # PTL default. In practice we don't usually train for more than 1 epoch. | |
max_steps: 100000 # consumed_samples = global_step * micro_batch_size * data_parallel_size * accumulate_grad_batches | |
log_every_n_steps: 10 | |
val_check_interval: 100 | |
limit_val_batches: 50 | |
limit_test_batches: 500 | |
accumulate_grad_batches: 1 | |
gradient_clip_val: 1.0 | |
benchmark: False | |
exp_manager: | |
explicit_log_dir: null | |
exp_dir: null | |
name: megatron_bert | |
create_wandb_logger: False | |
wandb_logger_kwargs: | |
project: null | |
name: null | |
resume_if_exists: True | |
resume_ignore_no_checkpoint: True | |
create_checkpoint_callback: True | |
checkpoint_callback_params: | |
monitor: val_loss | |
save_top_k: 10 | |
mode: min | |
always_save_nemo: False # saves nemo file during validation, not implemented for model parallel | |
filename: 'megatron_bert--{val_loss:.2f}-{step}-{consumed_samples}' | |
model_parallel_size: ${multiply:${model.tensor_model_parallel_size}, ${model.pipeline_model_parallel_size}} | |
model: | |
# model parallelism | |
micro_batch_size: 4 | |
global_batch_size: 8 | |
tensor_model_parallel_size: 1 | |
pipeline_model_parallel_size: 1 | |
virtual_pipeline_model_parallel_size: null | |
# model architecture | |
encoder_seq_length: 512 | |
max_position_embeddings: ${.encoder_seq_length} | |
num_layers: 12 | |
hidden_size: 768 | |
ffn_hidden_size: 3072 # Transformer FFN hidden size. Usually 4 * hidden_size. | |
num_attention_heads: 12 | |
init_method_std: 0.02 # Standard deviation of the zero mean normal distribution used for weight initialization.') | |
hidden_dropout: 0.1 # Dropout probability for hidden state transformer. | |
kv_channels: null # Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if null | |
apply_query_key_layer_scaling: True # scale Q * K^T by 1 / layer-number. | |
layernorm_epsilon: 1e-5 | |
make_vocab_size_divisible_by: 128 # Pad the vocab size to be divisible by this value for computation efficiency. | |
pre_process: True # add embedding | |
post_process: True # add pooler | |
bert_binary_head: True # BERT binary head | |
tokenizer: | |
library: 'megatron' | |
type: 'BertWordPieceLowerCase' | |
model: null | |
vocab_file: null | |
merge_file: null | |
# precision | |
native_amp_init_scale: 4294967296 # 2 ** 32 | |
native_amp_growth_interval: 1000 | |
fp32_residual_connection: False # Move residual connections to fp32 | |
fp16_lm_cross_entropy: False # Move the cross entropy unreduced loss calculation for lm head to fp16 | |
# Megatron O2-style half-precision | |
megatron_amp_O2: False # Enable O2-level automatic mixed precision using main parameters | |
grad_allreduce_chunk_size_mb: 125 | |
grad_div_ar_fusion: False | |
# miscellaneous | |
seed: 1234 | |
use_cpu_initialization: False # Init weights on the CPU (slow for large models) | |
onnx_safe: False # Use work-arounds for known problems with Torch ONNX exporter. | |
gradient_as_bucket_view: True # PyTorch DDP argument. Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory) | |
## Activation Checkpointing | |
# NeMo Megatron supports 'selective' activation checkpointing where only the memory intensive part of attention is checkpointed. | |
# These memory intensive activations are also less compute intensive which makes activation checkpointing more efficient for LLMs (20B+). | |
# See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details. | |
# 'full' will checkpoint the entire transformer layer. | |
activations_checkpoint_granularity: null # 'selective' or 'full' | |
activations_checkpoint_method: null # 'uniform', 'block' | |
# 'uniform' divides the total number of transformer layers and checkpoints the input activation | |
# of each chunk at the specified granularity. When used with 'selective', 'uniform' checkpoints all attention blocks in the model. | |
# 'block' checkpoints the specified number of layers per pipeline stage at the specified granularity | |
activations_checkpoint_num_layers: null | |
# when using 'uniform' this creates groups of transformer layers to checkpoint. Usually set to 1. Increase to save more memory. | |
# when using 'block' this this will checkpoint the first activations_checkpoint_num_layers per pipeline stage. | |
num_micro_batches_with_partial_activation_checkpoints: null | |
# This feature is valid only when used with pipeline-model-parallelism. | |
# When an integer value is provided, it sets the number of micro-batches where only a partial number of Transformer layers get checkpointed | |
# and recomputed within a window of micro-batches. The rest of micro-batches in the window checkpoint all Transformer layers. The size of window is | |
# set by the maximum outstanding micro-batch backpropagations, which varies at different pipeline stages. The number of partial layers to checkpoint | |
# per micro-batch is set by 'activations_checkpoint_num_layers' with 'activations_checkpoint_method' of 'block'. | |
# This feature enables using activation checkpoint at a fraction of micro-batches up to the point of full GPU memory usage. | |
activations_checkpoint_layers_per_pipeline: null | |
# This feature is valid only when used with pipeline-model-parallelism. | |
# When an integer value (rounded down when float is given) is provided, it sets the number of Transformer layers to skip checkpointing at later | |
# pipeline stages. For example, 'activations_checkpoint_layers_per_pipeline' of 3 makes pipeline stage 1 to checkpoint 3 layers less than | |
# stage 0 and stage 2 to checkpoint 6 layers less stage 0, and so on. This is possible because later pipeline stage | |
# uses less GPU memory with fewer outstanding micro-batch backpropagations. Used with 'num_micro_batches_with_partial_activation_checkpoints', | |
# this feature removes most of activation checkpoints at the last pipeline stage, which is the critical execution path. | |
sequence_parallel: False | |
data: | |
# Path to data must be specified by the user. | |
# can override from the CLI: "model.data.data_prefix=[.5,/raid/data/pile/my-gpt3_00_text_document,.5,/raid/data/pile/my-gpt3_01_text_document]", | |
# Or see example below: | |
# data_prefix: | |
# - .5 | |
# - /raid/data/pile/my-gpt3_00_text_document | |
# - .5 | |
# - /raid/data/pile/my-gpt3_01_text_document | |
data_prefix: ??? | |
index_mapping_dir: null # path to save index mapping .npy files, by default will save in the same location as data_prefix | |
data_impl: mmap | |
splits_string: 900,50,50 | |
seq_length: ${model.encoder_seq_length} | |
skip_warmup: True | |
num_workers: 0 | |
dataloader_type: single # cyclic | |
reset_position_ids: False # Reset position ids after end-of-document token | |
reset_attention_mask: False # Reset attention mask after end-of-document token | |
eod_mask_loss: False # Mask loss for the end of document tokens | |
masked_lm_prob: 0.15 # Probability of replacing a token with mask. | |
short_seq_prob: 0.1 # Probability of producing a short sequence. | |
optim: | |
name: fused_adam | |
lr: 2e-4 | |
weight_decay: 0.01 | |
betas: | |
- 0.9 | |
- 0.98 | |
sched: | |
name: CosineAnnealing | |
warmup_steps: 500 | |
constant_steps: 50000 | |
min_lr: 2e-5 | |