codewithdark commited on
Commit
94e12d2
·
verified ·
1 Parent(s): 465a232

Delete model.py

Browse files
Files changed (1) hide show
  1. model.py +0 -40
model.py DELETED
@@ -1,40 +0,0 @@
1
- from transformers import PretrainedConfig, PreTrainedModel
2
-
3
- class DiffusionConfig(PretrainedConfig):
4
- """Configuration class for Diffusion-LLM model."""
5
- model_type = "diffusionLM"
6
-
7
- def __init__(
8
- self,
9
- vocab_size: int = 50257,
10
- hidden_size: int = 768,
11
- num_hidden_layers: int = 12,
12
- num_attention_heads: int = 12,
13
- intermediate_size: int = 3072,
14
- hidden_dropout_prob: float = 0.1,
15
- attention_probs_dropout_prob: float = 0.1,
16
- max_position_embeddings: int = 1024,
17
- initializer_range: float = 0.02,
18
- layer_norm_eps: float = 1e-12,
19
- pad_token_id: int = 0,
20
- mask_token_id: int = 50256,
21
- eos_token_id: int = 50256,
22
- num_timesteps: int = 100,
23
- time_embed_dim: int = 128,
24
- **kwargs
25
- ):
26
- super().__init__(pad_token_id=pad_token_id, **kwargs)
27
- self.vocab_size = vocab_size
28
- self.hidden_size = hidden_size
29
- self.num_hidden_layers = num_hidden_layers
30
- self.num_attention_heads = num_attention_heads
31
- self.intermediate_size = intermediate_size
32
- self.hidden_dropout_prob = hidden_dropout_prob
33
- self.attention_probs_dropout_prob = attention_probs_dropout_prob
34
- self.max_position_embeddings = max_position_embeddings
35
- self.initializer_range = initializer_range
36
- self.layer_norm_eps = layer_norm_eps
37
- self.mask_token_id = mask_token_id
38
- self.eos_token_id = eos_token_id
39
- self.num_timesteps = num_timesteps
40
- self.time_embed_dim = time_embed_dim