trainer: | |
devices: 1 | |
num_nodes: 1 | |
accelerator: gpu | |
logger: False # logger provided by exp_manager | |
precision: 16 # 16, 32, or bf16 | |
tensor_model_parallel_size: 1 | |
pipeline_model_parallel_size: 1 | |
pipeline_model_parallel_split_rank: 0 # used for encoder and decoder model | |
checkpoint_dir: null # checkpoint file dir. This is used to load the PTL checkpoint generated during the GPT training | |
checkpoint_name: null # PTL checkpoint file name, only used for PTL checkpoint loading | |
hparams_file: null # model configuration file, only used for PTL checkpoint loading the hparam file can be found under the pytorch lightning experiment result directory. The filename is `hparams.yaml` | |
model_type: t5 | |