{ "cells": [ { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "view-in-github" }, "source": [ "\"Open" ] }, { "cell_type": "markdown", "metadata": { "id": "RzcuJMXHZMc7" }, "source": [ "A MinGPT + Lightning + xFormers example Code from Sean Naren (@seannaren)\n", "This is an hommage to https://github.com/karpathy/minGPT\n", "\n", "\n", "See https://github.com/facebookresearch/xformers/blob/main/examples/microGPT.py\n", "for a matching script\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "gJlGTQ-lYERT", "outputId": "52c1c057-6106-422c-9bb9-7fc4ef8262ce" }, "outputs": [], "source": [ "!pip install --pre torch\n", "!pip install xformers pytorch_lightning numpy" ] }, { "cell_type": "markdown", "metadata": { "id": "zMQWysh7Y6nC" }, "source": [ "Now check all our dependencies. If Triton is not compatible with the GPU or the CUDA runtime served by Colab, please make sure that it's not installed in the above" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "fphnY4yrY9z_", "outputId": "744c8bcc-9f71-42bb-b3c2-381622fa8a8b" }, "outputs": [], "source": [ "import math\n", "import os\n", "\n", "import pytorch_lightning as pl\n", "import torch\n", "import torch.nn as nn\n", "from pytorch_lightning import Trainer, seed_everything\n", "from pytorch_lightning.utilities import rank_zero_info\n", "from torch.nn import functional as F\n", "from torch.utils.data import DataLoader, Dataset, RandomSampler\n", "\n", "from xformers.factory.model_factory import xFormer, xFormerConfig" ] }, { "cell_type": "markdown", "metadata": { "id": "zXTliQkHZI-6" }, "source": [ "Let's first define our GPT-like model. Please note that all the parameters in the config dictionnary can be changed more or less at will, but the attention mechanism needs to be compatible with causality constraints. We'll be using Pytorch Lightning to nicely specify all the specific training steps" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "id": "dQcFhB_jZXaH" }, "outputs": [], "source": [ "class GPT(pl.LightningModule):\n", " \"\"\" the full GPT language model, with a context size of block_size \"\"\"\n", "\n", " def __init__(\n", " self,\n", " vocab_size,\n", " weight_decay=0.1,\n", " betas=(0.9, 0.95),\n", " learning_rate=1e-4,\n", " n_embd=512,\n", " block_size=128,\n", " n_layer=8,\n", " n_head=4,\n", " resid_pdrop=0.1,\n", " attn_pdrop=0.1,\n", " mlp_pdrop=0.1,\n", " attention=\"scaled_dot_product\",\n", " hidden_layer_multiplier=4,\n", " warmup_tokens=20,\n", " final_tokens=1000,\n", " ):\n", " super().__init__()\n", "\n", " # auto creates self.hparams from the method signature\n", " self.save_hyperparameters()\n", "\n", " # A list of the encoder or decoder blocks which constitute the Transformer.\n", " xformer_config = [\n", " {\n", " \"block_type\": \"encoder\",\n", " \"num_layers\": self.hparams.n_layer,\n", " \"dim_model\": self.hparams.n_embd,\n", " \"residual_norm_style\": \"pre\",\n", " \"position_encoding_config\": {\n", " \"name\": \"vocab\",\n", " \"seq_len\": self.hparams.block_size,\n", " \"vocab_size\": self.hparams.vocab_size,\n", " },\n", " \"multi_head_config\": {\n", " \"num_heads\": self.hparams.n_head,\n", " \"residual_dropout\": self.hparams.resid_pdrop,\n", " \"use_rotary_embeddings\": True,\n", " \"attention\": {\n", " \"name\": self.hparams.attention,\n", " \"dropout\": self.hparams.attn_pdrop,\n", " \"causal\": True,\n", " \"seq_len\": self.hparams.block_size,\n", " },\n", " },\n", " \"feedforward_config\": {\n", " \"name\": \"MLP\",\n", " \"dropout\": self.hparams.mlp_pdrop,\n", " \"activation\": \"gelu\",\n", " \"hidden_layer_multiplier\": self.hparams.hidden_layer_multiplier,\n", " },\n", " }\n", " ]\n", "\n", " config = xFormerConfig(xformer_config)\n", " self.model = xFormer.from_config(config)\n", "\n", " # decoder head\n", " self.ln_f = nn.LayerNorm(self.hparams.n_embd)\n", " self.head = nn.Linear(self.hparams.n_embd, self.hparams.vocab_size, bias=False)\n", "\n", " self.block_size = self.hparams.block_size\n", " self.apply(self._init_weights)\n", "\n", " self._tokens_seen = 0\n", "\n", " def _init_weights(self, module):\n", " if isinstance(module, (nn.Linear, nn.Embedding)):\n", " module.weight.data.normal_(mean=0.0, std=0.02)\n", " if isinstance(module, nn.Linear) and module.bias is not None:\n", " module.bias.data.zero_()\n", " elif isinstance(module, nn.LayerNorm):\n", " module.bias.data.zero_()\n", " module.weight.data.fill_(1.0)\n", "\n", " # Reset the token counter\n", " self._tokens_seen = 0\n", "\n", " def get_block_size(self):\n", " return self.block_size\n", "\n", " def configure_optimizers(self):\n", " # Create the optimizer and the training schedule:\n", " # - Handle the per-param weight decay\n", " no_decay = [\"bias\", \"LayerNorm.weight\"]\n", " params_decay = [\n", " p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay)\n", " ]\n", " params_nodecay = [\n", " p for n, p in self.named_parameters() if any(nd in n for nd in no_decay)\n", " ]\n", " optim_groups = [\n", " {\"params\": params_decay, \"weight_decay\": self.hparams.weight_decay},\n", " {\"params\": params_nodecay, \"weight_decay\": 0.0},\n", " ]\n", "\n", " # - Start with a warm up, ramp up then cosine\n", " optimizer = torch.optim.AdamW(\n", " optim_groups, lr=self.hparams.learning_rate, betas=self.hparams.betas\n", " )\n", "\n", " def update_lr(*_):\n", " config = self.hparams\n", "\n", " if self._tokens_seen < config.warmup_tokens:\n", " # linear warmup\n", " lr_mult = float(self._tokens_seen) / float(max(1, config.warmup_tokens))\n", " lr_mult = max(lr_mult, 1e-2) # could be that we've not seen any yet\n", " else:\n", " # cosine learning rate decay\n", " progress = float(self._tokens_seen - config.warmup_tokens) / float(\n", " max(1, config.final_tokens - config.warmup_tokens)\n", " )\n", " lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))\n", "\n", " return lr_mult\n", "\n", " lr_scheduler = {\n", " \"scheduler\": torch.optim.lr_scheduler.LambdaLR(\n", " optimizer,\n", " lr_lambda=[update_lr, update_lr],\n", " ),\n", " \"name\": \"learning_rate\",\n", " \"interval\": \"step\", # The unit of the scheduler's step size\n", " \"frequency\": 1, # The frequency of the scheduler\n", " }\n", " return [optimizer], [lr_scheduler]\n", "\n", " def forward(self, src):\n", " # predict the next tokens (in latent space)\n", " prediction = self.model(src)\n", "\n", " # translate the predictions into tokens\n", " prediction = self.ln_f(prediction)\n", " logits = self.head(prediction)\n", "\n", " return logits\n", "\n", " def training_step(self, batch, _):\n", " src, targets = batch\n", "\n", " # Update the tokens we've seen (tracked for LR scheduling)\n", " self._tokens_seen += (src >= 0).numel()\n", "\n", " # same action as inference\n", " logits = self(src)\n", "\n", " # if we are given some desired targets also calculate the loss\n", " loss = None\n", " if targets is not None:\n", " loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))\n", "\n", " self.logger.log_metrics(\n", " {\n", " \"train_loss\": loss.mean(),\n", " \"learning_rate\": self.lr_schedulers().get_last_lr()[0],\n", " },\n", " step=trainer.global_step,\n", " )\n", "\n", " return loss" ] }, { "cell_type": "markdown", "metadata": { "id": "99vrWKt0Z17x" }, "source": [ "Now let's define our dataset. This comes straight from MinGPT, and the idea is to serve a sequence of character (of size `block_size`) given any starting point" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "id": "UXQ-JeOQZ6wR" }, "outputs": [], "source": [ "class CharDataset(Dataset):\n", " def __init__(self, data, block_size):\n", " chars = list(set(data))\n", " data_size, vocab_size = len(data), len(chars)\n", " rank_zero_info(\"data has %d characters, %d unique.\" % (data_size, vocab_size))\n", "\n", " self.stoi = {ch: i for i, ch in enumerate(chars)}\n", " self.itos = {i: ch for i, ch in enumerate(chars)}\n", " self.block_size = block_size\n", " self.vocab_size = vocab_size\n", " self.data = data\n", "\n", " def __len__(self):\n", " return len(self.data) - self.block_size\n", "\n", " def __getitem__(self, i):\n", " chunk = self.data[i : i + self.block_size + 1]\n", " dix = [self.stoi[s] for s in chunk]\n", "\n", " # src and target are off by one, we want the model to predict the next word\n", " x = torch.tensor(dix[:-1], dtype=torch.long)\n", " y = torch.tensor(dix[1:], dtype=torch.long)\n", " return x, y\n", "\n", " def to_tokens(self, message, device):\n", " return torch.tensor([self.stoi[s] for s in message], dtype=torch.long)[\n", " None, ...\n", " ].to(device)\n", "\n", " def from_tokens(self, tokens):\n", " return \"\".join([self.itos[int(i)] for i in tokens])" ] }, { "cell_type": "markdown", "metadata": { "id": "oPyC2ig0Z86m" }, "source": [ "Now, let's plan ahead: how can we probe our model ? Given the training (guess the next character), a nice way is to sample the model given an initial bait. The predictions are then chained after the bait, and we can keep probing the model for predictions over a rolling window. Note that contrary to the training phase, this is sequential, we only predict one character ahead and then repeat" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "id": "LUsApc3NaDCP" }, "outputs": [], "source": [ "@torch.no_grad()\n", "def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):\n", " \"\"\"\n", " take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in\n", " the sequence, feeding the predictions back into the model each time. Clearly the sampling\n", " has quadratic complexity unlike an RNN that is only linear, and has a finite context window\n", " of block_size, unlike an RNN that has an infinite context window.\n", " \"\"\"\n", " block_size = model.get_block_size()\n", " model.eval()\n", "\n", " # CREDITS: https://github.com/karpathy/minGPT/blob/master/mingpt/utils.py\n", " def top_k_logits(logits, k):\n", " v, _ = torch.topk(logits, k)\n", " out = logits.clone()\n", " out[out < v[:, [-1]]] = -float(\"Inf\")\n", " return out\n", "\n", " for _ in range(steps):\n", " x_cond = (\n", " x if x.size(1) <= block_size else x[:, -block_size:]\n", " ) # crop context if needed\n", " logits = model(x_cond)\n", "\n", " # pluck the logits at the final step and scale by temperature\n", " logits = logits[:, -1, :] / temperature\n", "\n", " # optionally crop probabilities to only the top k options\n", " if top_k is not None:\n", " logits = top_k_logits(logits, top_k)\n", "\n", " # apply softmax to convert to probabilities\n", " probs = F.softmax(logits, dim=-1)\n", "\n", " # sample from the distribution or take the most likely\n", " if sample:\n", " ix = torch.multinomial(probs, num_samples=1)\n", " else:\n", " _, ix = torch.topk(probs, k=1, dim=-1)\n", "\n", " # append to the sequence and continue\n", " x = torch.cat((x, ix), dim=1)\n", "\n", " return x[0] # escape the batch dimension" ] }, { "cell_type": "markdown", "metadata": { "id": "wn3-zIdIaa5Z" }, "source": [ "Ok, good to go, we're equipped ! Let's train a model. Feel free to alter the parameters to get a feel of what's right or wrong\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 1000 }, "id": "O4iejUaQgQcE", "outputId": "ad4e0f5e-442f-45ad-98ad-7d97e25fb6e9" }, "outputs": [], "source": [ "seed_everything(42)\n", "REF_BATCH = 512\n", "BATCH = 32 # adjust depending on the available memory on your machine\n", "WORKERS = 2\n", "EPOCHS = 1\n", "BLOCK = 128\n", "WARMUP = 20\n", "LR = 6e-4\n", "LAYERS = 4\n", "\n", "if not os.path.exists(\"input.txt\"):\n", " os.system(\n", " \"wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt\"\n", " )\n", "\n", "text = open(\"input.txt\", \"r\").read()\n", "train_dataset = CharDataset(\n", " text, BLOCK\n", ") # one line of poem is roughly 50 characters\n", "random_sampler = RandomSampler(train_dataset)\n", "train_loader = DataLoader(\n", " train_dataset,\n", " sampler=random_sampler,\n", " batch_size=BATCH,\n", " num_workers=WORKERS,\n", " pin_memory=True,\n", ")\n", "\n", "model = GPT(\n", " vocab_size=train_dataset.vocab_size,\n", " block_size=train_dataset.block_size,\n", " attention=\"scaled_dot_product\",\n", " warmup_tokens=REF_BATCH * WARMUP,\n", " learning_rate=LR,\n", " final_tokens=EPOCHS * len(train_dataset) * BLOCK,\n", " n_layer=LAYERS\n", ")\n", "\n", "trainer = Trainer(\n", " devices=1, accelerator=\"gpu\",\n", " max_epochs=EPOCHS,\n", " precision=16,\n", " gradient_clip_val=1,\n", " log_every_n_steps=1,\n", " detect_anomaly=True,\n", " accumulate_grad_batches=REF_BATCH // BATCH,\n", ")\n", "\n", "trainer.fit(model, train_loader)" ] }, { "cell_type": "markdown", "metadata": { "id": "bCST-B0REwOD" }, "source": [ "Alright, this worked ! Let's see what we got\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "w3CDPX9fEycb" }, "outputs": [], "source": [ "context = \"Friends of my soul\" # Prime with something\n", "x = train_dataset.to_tokens(context, model.device)\n", "y = sample(model, x, steps=1000, temperature=1.0, sample=True, top_k=10)\n", "\n", "print(train_dataset.from_tokens(y))" ] } ], "metadata": { "accelerator": "GPU", "colab": { "authorship_tag": "ABX9TyPCTlvaFXBycBYZdtu3jSU7", "collapsed_sections": [], "include_colab_link": true, "name": "xformers_mingpt.ipynb", "provenance": [] }, "kernelspec": { "display_name": "Python 3.8.0 ('xformers')", "language": "python", "name": "python3" }, "language_info": { "name": "python", "version": "3.8.0" } }, "nbformat": 4, "nbformat_minor": 0 }