kishkath commited on
Commit
b7683e4
·
verified ·
1 Parent(s): 1bb0753

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +23 -14
  2. app.py +70 -1
  3. requirements.txt +4 -0
  4. train.py +287 -0
README.md CHANGED
@@ -1,14 +1,23 @@
1
- ---
2
- title: GPT2
3
- emoji: 💻
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 5.13.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- short_description: GPT-2 model with 124M trained with shakespeare text data
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
1
+ # Shakespeare Text Generator
2
+
3
+ This is a Shakespeare-style text generator trained on Shakespeare's works. The model is based on the GPT architecture and generates text in the style of Shakespeare's writings.
4
+
5
+ ## How to Use
6
+
7
+ 1. Enter a prompt in the text box
8
+ 2. Adjust the generation parameters if desired:
9
+ - Max Length: Controls the maximum length of generated text
10
+ - Temperature: Controls randomness (higher = more random)
11
+ - Top K: Controls diversity of word choices
12
+ 3. Click "Submit" to generate text
13
+
14
+ ## Examples
15
+
16
+ Try some of the example prompts provided below the interface!
17
+
18
+ ## Model Details
19
+
20
+ This model is a small GPT model trained on Shakespeare's texts. It uses the following architecture:
21
+ - 12 layers
22
+ - 768 embedding dimension
23
+ - 12 attention heads
app.py CHANGED
@@ -1 +1,70 @@
1
- ads
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import tiktoken
4
+ from torch.nn import functional as F
5
+ from train import GPT, GPTConfig
6
+
7
+ # Initialize model globally to avoid reloading
8
+ model = None
9
+
10
+ def initialize_model():
11
+ global model
12
+ if model is None:
13
+ model = GPT(GPTConfig())
14
+ model.load_state_dict(torch.load('model_state_dict.pth', map_location=torch.device('cpu')))
15
+ model.eval()
16
+ return model
17
+
18
+ def generate_shakespeare(prompt, max_length=100, temperature=0.8):
19
+ """Generate Shakespeare-style text from a prompt"""
20
+ # Initialize model if not already done
21
+ model = initialize_model()
22
+
23
+ # Encode the prompt
24
+ enc = tiktoken.get_encoding('gpt2')
25
+ prompt_tokens = enc.encode(prompt)
26
+ x = torch.tensor(prompt_tokens).unsqueeze(0)
27
+
28
+ with torch.no_grad():
29
+ while x.size(1) < max_length:
30
+ # Get predictions
31
+ logits, _ = model(x)
32
+ logits = logits[:, -1, :] / temperature
33
+
34
+ # Sample from the distribution
35
+ probs = F.softmax(logits, dim=-1)
36
+ next_token = torch.multinomial(probs, num_samples=1)
37
+
38
+ # Append to the sequence
39
+ x = torch.cat((x, next_token), dim=1)
40
+
41
+ # Stop if we generate a newline
42
+ if next_token.item() == enc.encode('\n')[0]:
43
+ break
44
+
45
+ # Decode the generated text
46
+ generated_tokens = x[0].tolist()
47
+ generated_text = enc.decode(generated_tokens)
48
+
49
+ return generated_text
50
+
51
+ # Create Gradio interface
52
+ demo = gr.Interface(
53
+ fn=generate_shakespeare,
54
+ inputs=[
55
+ gr.Textbox(label="Enter your prompt", placeholder="Enter some Shakespeare-style text..."),
56
+ gr.Slider(minimum=10, maximum=200, value=100, step=1, label="Max Length"),
57
+ gr.Slider(minimum=0.1, maximum=2.0, value=0.8, step=0.1, label="Temperature")
58
+ ],
59
+ outputs=gr.Textbox(label="Generated Text"),
60
+ title="Shakespeare Text Generator",
61
+ description="Generate Shakespeare-style text based on your prompt using a fine-tuned GPT model.",
62
+ examples=[
63
+ ["To be, or not to be,", 100, 0.8],
64
+ ["All the world's a stage,", 100, 0.8],
65
+ ["Romeo, Romeo,", 100, 0.8]
66
+ ]
67
+ )
68
+
69
+ if __name__ == "__main__":
70
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ tiktoken
4
+ transformers
train.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Solving for residual std scaling issue
2
+ import os
3
+ import math
4
+ import time
5
+ import inspect
6
+ from dataclasses import dataclass
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch.nn import functional as F
10
+
11
+
12
+ class CausalSelfAttention(nn.Module):
13
+
14
+ def __init__(self, config):
15
+ super().__init__()
16
+ assert config.n_embd % config.n_head == 0
17
+ # key, query, value projections for all heads, but in a batch
18
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
19
+ # output projection
20
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
21
+ self.c_proj.NANGPT_SCALE_INIT = 1
22
+ # regularization
23
+ self.n_head = config.n_head
24
+ self.n_embd = config.n_embd
25
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)).view(1, 1, config.block_size, config.block_size))
26
+
27
+ def forward(self, x):
28
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
29
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
30
+ # nh is "number of heads", hs is "head size", and C (number of channels) = nh * hs
31
+ # e.g. in GPT-2 (124M), n_head=12, hs=64, so nh*hs=C=768 channels in the Transformer
32
+ qkv = self.c_attn(x)
33
+ q, k, v = qkv.split(self.n_embd, dim=2)
34
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
35
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
36
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
37
+
38
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
39
+ att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf'))
40
+ att = F.softmax(att, dim=-1)
41
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
42
+
43
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
44
+ # output projection
45
+ y = self.c_proj(y)
46
+ return y
47
+
48
+
49
+ class MLP(nn.Module):
50
+
51
+ def __init__(self, config):
52
+ super().__init__()
53
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd)
54
+ self.gelu = nn.GELU(approximate='tanh')
55
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd)
56
+ self.c_proj.NANOGPT_SCALE_INIT = 1
57
+
58
+ def forward(self, x):
59
+ x = self.c_fc(x)
60
+ x = self.gelu(x)
61
+ x = self.c_proj(x)
62
+ return x
63
+
64
+ class Block(nn.Module):
65
+
66
+ def __init__(self, config):
67
+ super().__init__()
68
+ self.ln_1 = nn.LayerNorm(config.n_embd)
69
+ self.attn = CausalSelfAttention(config)
70
+ self.ln_2 = nn.LayerNorm(config.n_embd)
71
+ self.mlp = MLP(config)
72
+
73
+ def forward(self, x):
74
+ x = x + self.attn(self.ln_1(x))
75
+ x = x + self.mlp(self.ln_2(x))
76
+ return x
77
+
78
+
79
+ @dataclass
80
+ class GPTConfig:
81
+ block_size: int = 1024 # max sequence length
82
+ vocab_size: int = 50257 # number of tokens: 50,000 BPE merges + 256 bytes tokens + 1 <|endoftext|> token
83
+ n_layer: int = 12 # number of layers
84
+ n_head: int = 12 # number of heads
85
+ n_embd: int = 768 # embedding dimension
86
+
87
+
88
+ class GPT(nn.Module):
89
+
90
+ def __init__(self, config):
91
+ super().__init__()
92
+ self.config = config
93
+
94
+ self.transformer = nn.ModuleDict(dict(
95
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
96
+ wpe = nn.Embedding(config.block_size, config.n_embd),
97
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
98
+ ln_f = nn.LayerNorm(config.n_embd),
99
+ ))
100
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
101
+
102
+ # weight sharing
103
+ self.transformer.wte.weight = self.lm_head.weight
104
+
105
+ # weight initialization
106
+ self.apply(self._init_weights)
107
+
108
+ def _init_weights(self, module):
109
+ if isinstance(module, nn.Linear):
110
+ std = 0.02
111
+ if hasattr(module, 'NANGPT_SCALE_INIT'):
112
+ std *= (2 * self.config.n_layer) ** -0.5
113
+ torch.nn.init.normal_(module.weight, mean = 0.0, std = std)
114
+ if module.bias is not None:
115
+ torch.nn.init.zeros_(module.bias)
116
+ elif isinstance(module, nn.Embedding):
117
+ torch.nn.init.normal_(module.weight, mean=0.0, std = 0.02)
118
+
119
+
120
+
121
+ def forward(self, idx, targets=None):
122
+ # idx is of shape (B, T)
123
+ B, T = idx.size()
124
+ assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}"
125
+ # forward the token and posisition embeddings
126
+ pos = torch.arange(0, T, dtype=torch.long, device=idx.device) # shape (T)
127
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (T, n_embd)
128
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
129
+ x = tok_emb + pos_emb
130
+ # forward the blocks of the transformer
131
+ for block in self.transformer.h:
132
+ x = block(x)
133
+ # forward the final layernorm and the classifier
134
+ x = self.transformer.ln_f(x)
135
+ logits = self.lm_head(x) # (B, T, vocab_size)
136
+ loss = None
137
+ if targets is not None:
138
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
139
+ return logits, loss
140
+
141
+ @classmethod
142
+ def from_pretrained(cls, model_type):
143
+ """Loads pretrained GPT-2 model weights from huggingface"""
144
+ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
145
+ from transformers import GPT2LMHeadModel
146
+ print("loading weights from pretrained gpt: %s" % model_type)
147
+
148
+ # n_layer, n_head and n_embd are determined from model_type
149
+ config_args = {
150
+ 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
151
+ 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
152
+ 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
153
+ 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
154
+ }[model_type]
155
+ config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
156
+ config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
157
+ # create a from-scratch initialized minGPT model
158
+ config = GPTConfig(**config_args)
159
+ model = GPT(config)
160
+ sd = model.state_dict()
161
+ sd_keys = sd.keys()
162
+ sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
163
+
164
+ # init a huggingface/transformers model
165
+ model_hf = GPT2LMHeadModel.from_pretrained(model_type)
166
+ sd_hf = model_hf.state_dict()
167
+
168
+ # copy while ensuring all of the parameters are aligned and match in names and shapes
169
+ sd_keys_hf = sd_hf.keys()
170
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
171
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
172
+ transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
173
+ # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
174
+ # this means that we have to transpose these weights when we import them
175
+ assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
176
+ for k in sd_keys_hf:
177
+ if any(k.endswith(w) for w in transposed):
178
+ # special treatment for the Conv1D weights we need to transpose
179
+ assert sd_hf[k].shape[::-1] == sd[k].shape
180
+ with torch.no_grad():
181
+ sd[k].copy_(sd_hf[k].t())
182
+ else:
183
+ # vanilla copy over the other parameters
184
+ assert sd_hf[k].shape == sd[k].shape
185
+ with torch.no_grad():
186
+ sd[k].copy_(sd_hf[k])
187
+
188
+ return model
189
+
190
+ # model = GPT.from_pretrained('gpt2')
191
+
192
+ device = 'cpu'
193
+ if torch.cuda.is_available():
194
+ device = 'cuda'
195
+ elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
196
+ device = "mps"
197
+ print(f"using device: {device}")
198
+
199
+ # SEED
200
+ torch.manual_seed(1337)
201
+ if torch.cuda.is_available():
202
+ torch.cuda.manual_seed(1337)
203
+
204
+ # STOP
205
+ num_return_sequences = 5
206
+ max_length = 30
207
+
208
+
209
+
210
+ import tiktoken
211
+
212
+ class DataLoaderLite:
213
+ def __init__(self, B, T):
214
+ self.B = B
215
+ self.T = T
216
+
217
+ # at init load tokens from disk and store them in memory
218
+ with open('input.txt', 'r') as f:
219
+ text = f.read()
220
+ enc = tiktoken.get_encoding('gpt2')
221
+ tokens = enc.encode(text)
222
+ self.tokens = torch.tensor(tokens)
223
+ print(f'loaded {len(self.tokens)} tokens')
224
+ print(f'1 epoch = {len(self.tokens) // (B * T)} batches')
225
+
226
+ # state
227
+ self.current_position = 0
228
+
229
+ def next_batch(self):
230
+ B, T = self.B, self.T
231
+ buf = self.tokens[self.current_position: self.current_position + B * T + 1]
232
+ x = (buf[:-1]).view(B, T) # inputs
233
+ y = (buf[1:]).view(B, T) # targets
234
+ # advance the position in the tensor
235
+ self.current_position += B*T
236
+ # if loading the next batch would be out of bounds, reset
237
+ if self.current_position + (B * T + 1) > len(self.tokens):
238
+ self.current_position = 0
239
+ return x, y
240
+
241
+
242
+ model = GPT(GPTConfig())
243
+ model.to(device)
244
+
245
+ train_loader = DataLoaderLite(B = 4, T = 32)
246
+
247
+ # NEW CODE
248
+ optimizer = torch.optim.AdamW(model.parameters(), lr = 3e-4)
249
+ for i in range(50):
250
+ x, y = train_loader.next_batch()
251
+ x, y = x.to(device), y.to(device)
252
+ optimizer.zero_grad()
253
+ logits, loss = model(x, y)
254
+ loss.backward()
255
+ optimizer.step()
256
+ print(f'step{i}, loss: {loss.item()}')
257
+
258
+
259
+ print(loss)
260
+ import sys; sys.exit(0)
261
+
262
+ torch.manual_seed(42)
263
+ torch.cuda.manual_seed(42)
264
+ while x.size(1) < max_length:
265
+ # forward the model to get the logits
266
+ with torch.no_grad():
267
+ logits = model(x)[0] # (B, T, vocab_size)
268
+ # take the logits at the last position
269
+ logits = logits[:, -1, :] # (B, vocab_size)
270
+ # get the probabilities
271
+ probs = F.softmax(logits, dim=-1)
272
+ # do top-k sampling of 50 (huggingface pipeline default)
273
+ # topk_probs here becomes (5, 50), topk_indices is (5, 50)
274
+ topk_probs, topk_indices = torch.topk(probs, 50, dim=-1)
275
+ # select a token from the top-k probabilities
276
+ # note: multinomial does not demand the input to sum to 1
277
+ ix = torch.multinomial(topk_probs, 1) # (B, 1)
278
+ # gather the corresponding indices
279
+ xcol = torch.gather(topk_indices, -1, ix) # (B, 1)
280
+ # append to the sequence
281
+ x = torch.cat((x, xcol), dim=1)
282
+
283
+ # print the generated text
284
+ for i in range(num_return_sequences):
285
+ tokens = x[i, :max_length].tolist()
286
+ decoded = enc.decode(tokens)
287
+ print(">", decoded)