File size: 1,364 Bytes
558b35e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# custom_model.py

from transformers import PreTrainedModel, PretrainedConfig
import torch
import torch.nn as nn

class CustomConfig(PretrainedConfig):
    model_type = "custom_model"

    def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, num_labels=2, **kwargs):
        super().__init__(**kwargs)
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.num_labels = num_labels

class CustomModel(PreTrainedModel):
    config_class = CustomConfig

    def __init__(self, config):
        super().__init__(config)
        self.embedding = nn.Embedding(config.vocab_size, config.hidden_size)
        self.layers = nn.ModuleList([nn.TransformerEncoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads) for _ in range(config.num_hidden_layers)])
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        
        self.init_weights()

    def forward(self, input_ids):
        embeddings = self.embedding(input_ids)
        x = embeddings
        for layer in self.layers:
            x = layer(x)
        logits = self.classifier(x.mean(dim=1))  # Example: taking the mean of the output as input to the classifier
        return logits