NeMo / scripts /nlp_language_modeling /conf /prompt_learning_ckpt_to_nemo.yaml
camenduru's picture
thanks to NVIDIA ❤
7934b29
trainer:
devices: 1
num_nodes: 1
accelerator: gpu
logger: False # logger provided by exp_manager
precision: 16 # 16, 32, or bf16
tensor_model_parallel_size: 1
pipeline_model_parallel_size: 1
pipeline_model_parallel_split_rank: 0 # used for encoder and decoder model
checkpoint_dir: null # checkpoint file dir. This is used to load the PTL checkpoint generated during the GPT training
checkpoint_name: null # PTL checkpoint file name, only used for PTL checkpoint loading
hparams_file: null # model configuration file, only used for PTL checkpoint loading the hparam file can be found under the pytorch lightning experiment result directory. The filename is `hparams.yaml`
model_type: t5