Usage
batch_size = 16
embed_dim = 256
num_heads = 4
ff_dim = 512
num_layers = 2
noise_prob = 0.3
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=5000):
super().__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe.unsqueeze(0))
def forward(self, x):
return x + self.pe[:, :x.size(1)]
class TransformerBlock(nn.Module):
def __init__(self, embed_dim, num_heads, ff_dim):
super().__init__()
self.attention = nn.MultiheadAttention(embed_dim, num_heads)
self.norm1 = nn.LayerNorm(embed_dim)
self.ff = nn.Sequential(
nn.Linear(embed_dim, ff_dim),
nn.ReLU(),
nn.Linear(ff_dim, embed_dim)
)
self.norm2 = nn.LayerNorm(embed_dim)
def forward(self, x):
attn_output, _ = self.attention(x, x, x)
x = self.norm1(x + attn_output)
ff_output = self.ff(x)
return self.norm2(x + ff_output)
class DenoisingTransformer(nn.Module):
def __init__(self, vocab_size, embed_dim, num_heads, ff_dim, num_layers):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.positional_encoding = PositionalEncoding(embed_dim)
self.transformer_blocks = nn.ModuleList([
TransformerBlock(embed_dim, num_heads, ff_dim) for _ in range(num_layers)
])
self.fc = nn.Linear(embed_dim, vocab_size)
def forward(self, x):
x = self.embedding(x)
x = self.positional_encoding(x)
for block in self.transformer_blocks:
x = block(x)
return self.fc(x)
def load_model(path, device='cpu'):
checkpoint = torch.load(path, map_location=device)
hp = checkpoint['hyperparameters']
model = DenoisingTransformer(
hp['vocab_size'],
hp['embed_dim'],
hp['num_heads'],
hp['ff_dim'],
hp['num_layers']
).to(device)
model.load_state_dict(checkpoint['model_state_dict'])
return model, checkpoint['word2idx'], checkpoint['idx2word']
loaded_model, word2idx, idx2word = load_model('denoising_transformer.pth', device=device)
print("Model loaded successfully!")
print(f"Model device: {next(loaded_model.parameters()).device}")```
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
🙋
Ask for provider support