NeMo / examples /nlp /language_modeling /conf /megatron_t5_prompt_learning_inference.yaml
camenduru's picture
thanks to NVIDIA ❤
7934b29
raw
history blame contribute delete
596 Bytes
trainer:
devices: 1
num_nodes: 1
accelerator: gpu
logger: False # logger provided by exp_manager
precision: 16 # 16, 32, or bf16
data:
test_ds: ???
num_workers: 1
global_batch_size: 8
micro_batch_size: 8
tensor_model_parallel_size: 1
pipeline_model_parallel_size: 1
pipeline_model_parallel_split_rank: 0 # used for encoder and decoder model
language_model_path: ??? # path to a pretrained T5 nemo file
virtual_prompt_model_file: ??? # path to a MegatronT5PromptLearningModel nemo file
pred_file_path: ??? # Path were all model predicitons will be written to a text file