davidberenstein1957 commited on
Commit
1551da8
·
verified ·
1 Parent(s): 211c7ce

Add files using upload-large-folder tool

Browse files
Files changed (2) hide show
  1. README.md +1 -79
  2. tokenizer/tokenizer_config.json +8 -1
README.md CHANGED
@@ -68,85 +68,7 @@ The compression configuration of the model is stored in the `smash_config.json`
68
  The configuration of the model is stored in the `config.json` file.
69
 
70
  ```bash
71
- {
72
- "config": {
73
- "architectures": [
74
- "Llama4ForCausalLM"
75
- ],
76
- "attention_bias": false,
77
- "attention_chunk_size": 8192,
78
- "attention_dropout": 0.0,
79
- "attn_scale": 0.1,
80
- "attn_temperature_tuning": 4,
81
- "bos_token_id": 200000,
82
- "cache_implementation": "hybrid",
83
- "eos_token_id": [
84
- 200001,
85
- 200007,
86
- 200008
87
- ],
88
- "floor_scale": 8192,
89
- "for_llm_compressor": false,
90
- "head_dim": 8,
91
- "hidden_act": "silu",
92
- "hidden_size": 16,
93
- "initializer_range": 0.02,
94
- "interleave_moe_layer_step": 1,
95
- "intermediate_size": 32,
96
- "intermediate_size_mlp": 64,
97
- "max_position_embeddings": 10485760,
98
- "model_type": "llama4_text",
99
- "moe_layers": [
100
- 0,
101
- 1,
102
- 2,
103
- 3,
104
- 4
105
- ],
106
- "no_rope_layers": [
107
- 1,
108
- 1,
109
- 1,
110
- 0,
111
- 1
112
- ],
113
- "num_attention_heads": 10,
114
- "num_experts_per_tok": 1,
115
- "num_hidden_layers": 5,
116
- "num_key_value_heads": 2,
117
- "num_local_experts": 4,
118
- "output_router_logits": false,
119
- "pad_token_id": 200018,
120
- "rms_norm_eps": 1e-05,
121
- "rope_scaling": {
122
- "factor": 8.0,
123
- "high_freq_factor": 4.0,
124
- "low_freq_factor": 1.0,
125
- "original_max_position_embeddings": 8192,
126
- "rope_type": "llama3"
127
- },
128
- "rope_theta": 500000.0,
129
- "router_aux_loss_coef": 0.001,
130
- "router_jitter_noise": 0.0,
131
- "tie_word_embeddings": false,
132
- "torch_dtype": "bfloat16",
133
- "transformers_version": "4.51.3",
134
- "use_cache": true,
135
- "use_qk_norm": true,
136
- "vocab_size": 202048
137
- },
138
- "generation_config": {
139
- "_from_model_config": true,
140
- "bos_token_id": 200000,
141
- "eos_token_id": [
142
- 200001,
143
- 200007,
144
- 200008
145
- ],
146
- "pad_token_id": 200018,
147
- "transformers_version": "4.51.3"
148
- }
149
- }
150
  ```
151
 
152
  ## 🌍 Join the Pruna AI community!
 
68
  The configuration of the model is stored in the `config.json` file.
69
 
70
  ```bash
71
+ {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  ```
73
 
74
  ## 🌍 Join the Pruna AI community!
tokenizer/tokenizer_config.json CHANGED
@@ -9086,12 +9086,19 @@
9086
  "clean_up_tokenization_spaces": false,
9087
  "eos_token": "<|eot|>",
9088
  "extra_special_tokens": {},
 
9089
  "model_input_names": [
9090
  "input_ids",
9091
  "attention_mask"
9092
  ],
9093
  "model_max_length": 10485760,
 
9094
  "pad_token": "<|finetune_right_pad_id|>",
 
 
9095
  "processor_class": "Llama4Processor",
9096
- "tokenizer_class": "PreTrainedTokenizer"
 
 
 
9097
  }
 
9086
  "clean_up_tokenization_spaces": false,
9087
  "eos_token": "<|eot|>",
9088
  "extra_special_tokens": {},
9089
+ "max_length": 10485760,
9090
  "model_input_names": [
9091
  "input_ids",
9092
  "attention_mask"
9093
  ],
9094
  "model_max_length": 10485760,
9095
+ "pad_to_multiple_of": null,
9096
  "pad_token": "<|finetune_right_pad_id|>",
9097
+ "pad_token_type_id": 0,
9098
+ "padding_side": "right",
9099
  "processor_class": "Llama4Processor",
9100
+ "stride": 0,
9101
+ "tokenizer_class": "PreTrainedTokenizer",
9102
+ "truncation_side": "right",
9103
+ "truncation_strategy": "longest_first"
9104
  }