|
|
|
|
|
|
|
""" |
|
GPT model: |
|
- the initial stem consists of a combination of token encoding and a positional encoding |
|
- the meat of it is a uniform sequence of Transformer blocks |
|
- each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block |
|
- all blocks feed into a central residual pathway similar to resnets |
|
- the final decoder is a linear projection into a vanilla Softmax classifier |
|
""" |
|
|
|
import math,json |
|
import torch |
|
import torch.nn as nn |
|
from torch.nn import functional as F |
|
|
|
class GPTConfig: |
|
""" base GPT config, params common to all GPT versions """ |
|
embd_pdrop = 0.1 |
|
resid_pdrop = 0.1 |
|
attn_pdrop = 0.1 |
|
|
|
def __init__(self, vocab_size, block_size, **kwargs): |
|
self.vocab_size = vocab_size |
|
self.block_size = block_size |
|
for k,v in kwargs.items(): |
|
setattr(self, k, v) |
|
|
|
class GPT1Config(GPTConfig): |
|
""" GPT-1 like network roughly 125M params """ |
|
n_layer = 12 |
|
n_head = 12 |
|
n_embd = 768 |
|
|
|
class CausalSelfAttention(nn.Module): |
|
""" |
|
A vanilla multi-head masked self-attention layer with a projection at the end. |
|
It is possible to use torch.nn.MultiheadAttention here but I am including an |
|
explicit implementation here to show that there is nothing too scary here. |
|
""" |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
assert config.n_embd % config.n_head == 0 |
|
|
|
self.key = nn.Linear(config.n_embd, config.n_embd) |
|
self.query = nn.Linear(config.n_embd, config.n_embd) |
|
self.value = nn.Linear(config.n_embd, config.n_embd) |
|
|
|
self.attn_drop = nn.Dropout(config.attn_pdrop) |
|
self.resid_drop = nn.Dropout(config.resid_pdrop) |
|
|
|
self.proj = nn.Linear(config.n_embd, config.n_embd) |
|
|
|
num = int(bool(config.num_props)) |
|
|
|
self.register_buffer("mask", torch.tril(torch.ones(config.block_size + num, config.block_size + num)) |
|
.view(1, 1, config.block_size + num, config.block_size + num)) |
|
|
|
self.n_head = config.n_head |
|
|
|
def forward(self, x, layer_past=None): |
|
B, T, C = x.size() |
|
|
|
|
|
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
|
|
|
|
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) |
|
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf')) |
|
att = F.softmax(att, dim=-1) |
|
attn_save = att |
|
att = self.attn_drop(att) |
|
y = att @ v |
|
y = y.transpose(1, 2).contiguous().view(B, T, C) |
|
|
|
|
|
y = self.resid_drop(self.proj(y)) |
|
return y, attn_save |
|
|
|
class Block(nn.Module): |
|
""" an unassuming Transformer block """ |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
self.ln1 = nn.LayerNorm(config.n_embd) |
|
self.ln2 = nn.LayerNorm(config.n_embd) |
|
self.attn = CausalSelfAttention(config) |
|
self.mlp = nn.Sequential( |
|
nn.Linear(config.n_embd, 4 * config.n_embd), |
|
nn.GELU(), |
|
nn.Linear(4 * config.n_embd, config.n_embd), |
|
nn.Dropout(config.resid_pdrop), |
|
) |
|
|
|
def forward(self, x): |
|
y, attn = self.attn(self.ln1(x)) |
|
x = x + y |
|
x = x + self.mlp(self.ln2(x)) |
|
return x, attn |
|
|
|
class GPT(nn.Module): |
|
""" the full GPT language model, with a context size of block_size """ |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
|
|
|
|
self.config = config |
|
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd) |
|
self.type_emb = nn.Embedding(2, config.n_embd) |
|
if config.num_props: |
|
self.prop_nn = nn.Linear(config.num_props, config.n_embd) |
|
|
|
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd)) |
|
self.drop = nn.Dropout(config.embd_pdrop) |
|
|
|
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)]) |
|
|
|
self.ln_f = nn.LayerNorm(config.n_embd) |
|
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
|
|
|
self.block_size = config.block_size |
|
|
|
if config.lstm: |
|
self.lstm = nn.LSTM(input_size = config.n_embd, hidden_size = config.n_embd, num_layers = config.lstm_layers, dropout = 0.3, bidirectional = False) |
|
self.apply(self._init_weights) |
|
|
|
|
|
|
|
def get_block_size(self): |
|
return self.block_size |
|
|
|
def _init_weights(self, module): |
|
if isinstance(module, (nn.Linear, nn.Embedding)): |
|
module.weight.data.normal_(mean=0.0, std=0.02) |
|
if isinstance(module, nn.Linear) and module.bias is not None: |
|
module.bias.data.zero_() |
|
elif isinstance(module, nn.LayerNorm): |
|
module.bias.data.zero_() |
|
module.weight.data.fill_(1.0) |
|
|
|
def configure_optimizers(self, train_config): |
|
""" |
|
This long function is unfortunately doing something very simple and is being very defensive: |
|
We are separating out all parameters of the model into two buckets: those that will experience |
|
weight decay for regularization and those that won't (biases, and layernorm/embedding weights). |
|
We are then returning the PyTorch optimizer object. |
|
""" |
|
|
|
|
|
decay = set() |
|
no_decay = set() |
|
whitelist_weight_modules = (torch.nn.Linear, torch.nn.LSTM) |
|
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding) |
|
for mn, m in self.named_modules(): |
|
for pn, p in m.named_parameters(): |
|
fpn = '%s.%s' % (mn, pn) if mn else pn |
|
|
|
if pn.endswith('bias') or ('bias' in pn): |
|
|
|
no_decay.add(fpn) |
|
elif (pn.endswith('weight') or ('weight' in pn)) and isinstance(m, whitelist_weight_modules): |
|
|
|
decay.add(fpn) |
|
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): |
|
|
|
no_decay.add(fpn) |
|
|
|
|
|
no_decay.add('pos_emb') |
|
|
|
|
|
param_dict = {pn: p for pn, p in self.named_parameters()} |
|
inter_params = decay & no_decay |
|
union_params = decay | no_decay |
|
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) |
|
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ |
|
% (str(param_dict.keys() - union_params), ) |
|
|
|
|
|
optim_groups = [ |
|
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay}, |
|
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, |
|
] |
|
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas) |
|
return optimizer |
|
|
|
def forward(self, idx, targets=None, prop = None): |
|
b, t = idx.size() |
|
assert t <= self.block_size, "Cannot forward, model block size is exhausted." |
|
|
|
if self.config.num_props: |
|
assert prop.size(-1) == self.config.num_props, "Num_props should be equal to last dim of property vector" |
|
|
|
|
|
token_embeddings = self.tok_emb(idx) |
|
position_embeddings = self.pos_emb[:, :t, :] |
|
type_embeddings = self.type_emb(torch.ones((b,t), dtype = torch.long, device = idx.device)) |
|
x = self.drop(token_embeddings + position_embeddings + type_embeddings) |
|
|
|
embed = x |
|
|
|
if self.config.num_props: |
|
type_embd = self.type_emb(torch.zeros((b, 1), dtype = torch.long, device = idx.device)) |
|
if prop.ndim == 2: |
|
p = self.prop_nn(prop.unsqueeze(1)) |
|
else: |
|
p = self.prop_nn(prop) |
|
p += type_embd |
|
x = torch.cat([p, x], 1) |
|
|
|
|
|
attn_maps = [] |
|
|
|
for layer in self.blocks: |
|
x, attn = layer(x) |
|
attn_maps.append(attn) |
|
|
|
x = self.ln_f(x) |
|
logits = self.head(x) |
|
|
|
if self.config.num_props: |
|
num = int(bool(self.config.num_props)) |
|
else: |
|
num = 0 |
|
|
|
logits = logits[:, num:, :] |
|
|
|
|
|
loss = None |
|
if targets is not None: |
|
loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.view(-1)) |
|
|
|
return logits, loss, attn_maps, embed |
|
|
|
|
|
@torch.no_grad() |
|
def sample(self, x, steps, temperature=1.0, do_sample=False, top_k=None, top_p=None, prop=None): |
|
""" |
|
Take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in |
|
the sequence, feeding the predictions back into the model each time. Clearly the sampling |
|
has quadratic complexity unlike an RNN that is only linear, and has a finite context window |
|
of block_size, unlike an RNN that has an infinite context window. |
|
|
|
Most likely you'll want to make sure to be in model.eval() mode of operation for this. |
|
""" |
|
|
|
|
|
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): |
|
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering |
|
Args: |
|
logits: logits distribution shape (batch size x vocabulary size) |
|
top_k > 0: keep only top k tokens with highest probability (top-k filtering). |
|
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). |
|
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) |
|
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 |
|
""" |
|
top_k = min(top_k, logits.size(-1)) |
|
if top_k > 0: |
|
|
|
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] |
|
logits[indices_to_remove] = filter_value |
|
|
|
if top_p > 0.0: |
|
sorted_logits, sorted_indices = torch.sort(logits, descending=True) |
|
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) |
|
|
|
|
|
sorted_indices_to_remove = cumulative_probs > top_p |
|
|
|
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() |
|
sorted_indices_to_remove[..., 0] = 0 |
|
|
|
|
|
indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove) |
|
logits[indices_to_remove] = filter_value |
|
return logits |
|
|
|
|
|
for k in range(steps): |
|
x_cond = x if x.size(1) <= self.block_size else x[:, -self.block_size:] |
|
|
|
|
|
logits, _, _, _ = self(x_cond, prop = prop) |
|
|
|
|
|
logits = logits[:, -1, :] / temperature |
|
|
|
|
|
|
|
|
|
|
|
logits = top_k_top_p_filtering(logits, top_p=top_p, top_k=top_k) |
|
|
|
|
|
|
|
probs = F.softmax(logits, dim=-1) |
|
|
|
|
|
if do_sample: |
|
x_next = torch.multinomial(probs, num_samples=1) |
|
else: |
|
_, x_next = torch.topk(probs, k=1, dim=-1) |
|
|
|
|
|
x = torch.cat((x, x_next), dim=1) |
|
|
|
return x[:, 1:] |
|
|