|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import tempfile |
|
import unittest |
|
|
|
import torch |
|
from parameterized import parameterized |
|
from transformers import AutoModelForSeq2SeqLM, AutoModelForTokenClassification |
|
|
|
from peft import LoraConfig, TaskType, get_peft_model |
|
|
|
from .testing_common import PeftCommonTester, PeftTestConfigManager |
|
|
|
|
|
PEFT_ENCODER_DECODER_MODELS_TO_TEST = [ |
|
"ybelkada/tiny-random-T5ForConditionalGeneration-calibrated", |
|
"hf-internal-testing/tiny-random-BartForConditionalGeneration", |
|
] |
|
|
|
FULL_GRID = {"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "task_type": "SEQ_2_SEQ_LM"} |
|
|
|
|
|
class PeftEncoderDecoderModelTester(unittest.TestCase, PeftCommonTester): |
|
r""" |
|
Test if the PeftModel behaves as expected. This includes: |
|
- test if the model has the expected methods |
|
|
|
We use parametrized.expand for debugging purposes to test each model individually. |
|
""" |
|
|
|
transformers_class = AutoModelForSeq2SeqLM |
|
|
|
def prepare_inputs_for_testing(self): |
|
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) |
|
decoder_input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) |
|
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) |
|
|
|
input_dict = { |
|
"input_ids": input_ids, |
|
"decoder_input_ids": decoder_input_ids, |
|
"attention_mask": attention_mask, |
|
} |
|
|
|
return input_dict |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_model_attr(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_adapter_name(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_prepare_for_training(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_save_pretrained(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_save_pretrained_pickle(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_save_pretrained(model_id, config_cls, config_kwargs, safe_serialization=False) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_save_pretrained_selected_adapters_pickle(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs, safe_serialization=False) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand( |
|
PeftTestConfigManager.get_grid_parameters( |
|
{ |
|
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, |
|
"lora_kwargs": {"init_lora_weights": [False]}, |
|
"ia3_kwargs": {"init_ia3_weights": [False]}, |
|
"vera_kwargs": {"init_weights": [False]}, |
|
"task_type": "SEQ_2_SEQ_LM", |
|
}, |
|
) |
|
) |
|
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_merge_layers(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand( |
|
PeftTestConfigManager.get_grid_parameters( |
|
{ |
|
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, |
|
"lora_kwargs": {"init_lora_weights": [False]}, |
|
"task_type": "SEQ_2_SEQ_LM", |
|
}, |
|
) |
|
) |
|
def test_mixed_adapter_batches(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_mixed_adapter_batches(model_id, config_cls, config_kwargs) |
|
|
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_generate(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_generate(model_id, config_cls, config_kwargs) |
|
|
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_generate_pos_args(self, test_name, model_id, config_cls, config_kwargs): |
|
|
|
self._test_generate_pos_args(model_id, config_cls, config_kwargs, raises_err=True) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_generate_half_prec(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_prefix_tuning_half_prec_conversion(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_prefix_tuning_half_prec_conversion(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_training_encoder_decoders(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_training(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_training_encoder_decoders_layer_indexing(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_training_layer_indexing(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_training_encoder_decoders_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_inference_safetensors(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_peft_model_device_map(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_delete_adapter(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand( |
|
PeftTestConfigManager.get_grid_parameters( |
|
{ |
|
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, |
|
"lora_kwargs": {"init_lora_weights": [False]}, |
|
"adalora_kwargs": {"init_lora_weights": [False]}, |
|
"ia3_kwargs": {"init_ia3_weights": [False]}, |
|
"boft_kwargs": {"init_weights": [False]}, |
|
"vera_kwargs": {"init_weights": [False]}, |
|
"task_type": "SEQ_2_SEQ_LM", |
|
}, |
|
) |
|
) |
|
def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_unload_adapter(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand( |
|
PeftTestConfigManager.get_grid_parameters( |
|
{ |
|
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, |
|
"lora_kwargs": {"init_lora_weights": [False]}, |
|
"ia3_kwargs": {"init_ia3_weights": [False]}, |
|
"task_type": "SEQ_2_SEQ_LM", |
|
}, |
|
) |
|
) |
|
def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand( |
|
PeftTestConfigManager.get_grid_parameters( |
|
{ |
|
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, |
|
"lora_kwargs": {"init_lora_weights": [False]}, |
|
"adalora_kwargs": {"init_lora_weights": [False]}, |
|
"ia3_kwargs": {"init_ia3_weights": [False]}, |
|
"boft_kwargs": {"init_weights": [False]}, |
|
"vera_kwargs": {"init_weights": [False]}, |
|
"task_type": "SEQ_2_SEQ_LM", |
|
}, |
|
) |
|
) |
|
def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_disable_adapter(model_id, config_cls, config_kwargs) |
|
|
|
|
|
class PeftEncoderDecoderCustomModelTester(unittest.TestCase): |
|
""" |
|
A custom class to write any custom test related with Enc-Dec models |
|
""" |
|
|
|
def test_save_shared_tensors(self): |
|
model_id = "hf-internal-testing/tiny-random-RobertaModel" |
|
peft_config = LoraConfig( |
|
task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all" |
|
) |
|
model = AutoModelForTokenClassification.from_pretrained(model_id, num_labels=11) |
|
model = get_peft_model(model, peft_config) |
|
with tempfile.TemporaryDirectory() as tmp_dir: |
|
|
|
model.save_pretrained(tmp_dir, safe_serialization=True) |
|
|