File size: 2,874 Bytes
9d6cb8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#!/usr/bin/env python3

# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest

import torch

from peft import LoraConfig, get_peft_model_state_dict, inject_adapter_in_model
from peft.utils import ModulesToSaveWrapper


class DummyModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.embedding = torch.nn.Embedding(10, 10)
        self.linear = torch.nn.Linear(10, 10)
        self.lm_head = torch.nn.Linear(10, 10)

    def forward(self, input_ids):
        x = self.embedding(input_ids)
        x = self.linear(x)
        x = self.lm_head(x)
        return x


class TestPeft(unittest.TestCase):
    def setUp(self):
        self.model = DummyModel()

        lora_config = LoraConfig(
            lora_alpha=16,
            lora_dropout=0.1,
            r=64,
            bias="none",
            target_modules=["linear"],
        )

        self.model = inject_adapter_in_model(lora_config, self.model)

    def test_inject_adapter_in_model(self):
        dummy_inputs = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]])
        _ = self.model(dummy_inputs)

        for name, module in self.model.named_modules():
            if name == "linear":
                assert hasattr(module, "lora_A")
                assert hasattr(module, "lora_B")

    def test_get_peft_model_state_dict(self):
        peft_state_dict = get_peft_model_state_dict(self.model)

        for key in peft_state_dict.keys():
            assert "lora" in key

    def test_modules_to_save(self):
        self.model = DummyModel()

        lora_config = LoraConfig(
            lora_alpha=16,
            lora_dropout=0.1,
            r=64,
            bias="none",
            target_modules=["linear"],
            modules_to_save=["embedding"],
        )

        self.model = inject_adapter_in_model(lora_config, self.model)

        for name, module in self.model.named_modules():
            if name == "linear":
                assert hasattr(module, "lora_A")
                assert hasattr(module, "lora_B")
            elif name == "embedding":
                assert isinstance(module, ModulesToSaveWrapper)

        state_dict = get_peft_model_state_dict(self.model)

        assert "embedding.weight" in state_dict.keys()

        assert hasattr(self.model.embedding, "weight")