Upload folder using huggingface_hub
Browse files- README.md +60 -0
- all_results.json +9 -0
- config.json +36 -0
- generation_config.json +6 -0
- llamaboard_config.yaml +77 -0
- model-00001-of-00003.safetensors +3 -0
- model-00002-of-00003.safetensors +3 -0
- model-00003-of-00003.safetensors +3 -0
- model.safetensors.index.json +280 -0
- running_log.txt +339 -0
- special_tokens_map.json +23 -0
- tokenizer.json +0 -0
- tokenizer_config.json +149 -0
- train_results.json +9 -0
- trainer_log.jsonl +70 -0
- trainer_state.json +595 -0
- training_args.bin +3 -0
- training_args.yaml +39 -0
- training_loss.png +0 -0
README.md
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
license: other
|
4 |
+
base_model: deepseek-ai/deepseek-coder-7b-instruct-v1.5
|
5 |
+
tags:
|
6 |
+
- llama-factory
|
7 |
+
- freeze
|
8 |
+
- generated_from_trainer
|
9 |
+
model-index:
|
10 |
+
- name: deepseek-nlx-330k
|
11 |
+
results: []
|
12 |
+
---
|
13 |
+
|
14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
+
should probably proofread and complete it, then remove this comment. -->
|
16 |
+
|
17 |
+
# deepseek-nlx-330k
|
18 |
+
|
19 |
+
This model is a fine-tuned version of [deepseek-ai/deepseek-coder-7b-instruct-v1.5](https://huggingface.co/deepseek-ai/deepseek-coder-7b-instruct-v1.5) on the codes3_query_filtered_330k_nlx dataset.
|
20 |
+
|
21 |
+
## Model description
|
22 |
+
|
23 |
+
More information needed
|
24 |
+
|
25 |
+
## Intended uses & limitations
|
26 |
+
|
27 |
+
More information needed
|
28 |
+
|
29 |
+
## Training and evaluation data
|
30 |
+
|
31 |
+
More information needed
|
32 |
+
|
33 |
+
## Training procedure
|
34 |
+
|
35 |
+
### Training hyperparameters
|
36 |
+
|
37 |
+
The following hyperparameters were used during training:
|
38 |
+
- learning_rate: 5e-05
|
39 |
+
- train_batch_size: 16
|
40 |
+
- eval_batch_size: 8
|
41 |
+
- seed: 42
|
42 |
+
- distributed_type: multi-GPU
|
43 |
+
- num_devices: 4
|
44 |
+
- gradient_accumulation_steps: 8
|
45 |
+
- total_train_batch_size: 512
|
46 |
+
- total_eval_batch_size: 32
|
47 |
+
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
48 |
+
- lr_scheduler_type: cosine
|
49 |
+
- num_epochs: 1.0
|
50 |
+
|
51 |
+
### Training results
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
### Framework versions
|
56 |
+
|
57 |
+
- Transformers 4.48.2
|
58 |
+
- Pytorch 2.5.1+cu124
|
59 |
+
- Datasets 3.2.0
|
60 |
+
- Tokenizers 0.21.0
|
all_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 0.9963898916967509,
|
3 |
+
"num_input_tokens_seen": 144703488,
|
4 |
+
"total_flos": 5.635565866281075e+18,
|
5 |
+
"train_loss": 0.5665888682655666,
|
6 |
+
"train_runtime": 10913.0245,
|
7 |
+
"train_samples_per_second": 3.247,
|
8 |
+
"train_steps_per_second": 0.006
|
9 |
+
}
|
config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "deepseek-ai/deepseek-coder-7b-instruct-v1.5",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 100000,
|
9 |
+
"eos_token_id": 100015,
|
10 |
+
"head_dim": 128,
|
11 |
+
"hidden_act": "silu",
|
12 |
+
"hidden_size": 4096,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 11008,
|
15 |
+
"max_position_embeddings": 4096,
|
16 |
+
"mlp_bias": false,
|
17 |
+
"model_type": "llama",
|
18 |
+
"num_attention_heads": 32,
|
19 |
+
"num_hidden_layers": 30,
|
20 |
+
"num_key_value_heads": 32,
|
21 |
+
"pretraining_tp": 1,
|
22 |
+
"rms_norm_eps": 1e-06,
|
23 |
+
"rope_scaling": {
|
24 |
+
"factor": 1.0,
|
25 |
+
"high_freq_factor": 4.0,
|
26 |
+
"low_freq_factor": 1.0,
|
27 |
+
"original_max_position_embeddings": 4096,
|
28 |
+
"rope_type": "llama3"
|
29 |
+
},
|
30 |
+
"rope_theta": 10000.0,
|
31 |
+
"tie_word_embeddings": false,
|
32 |
+
"torch_dtype": "bfloat16",
|
33 |
+
"transformers_version": "4.48.2",
|
34 |
+
"use_cache": false,
|
35 |
+
"vocab_size": 102400
|
36 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 100000,
|
4 |
+
"eos_token_id": 100015,
|
5 |
+
"transformers_version": "4.48.2"
|
6 |
+
}
|
llamaboard_config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
top.booster: liger_kernel
|
2 |
+
top.checkpoint_path: null
|
3 |
+
top.finetuning_type: freeze
|
4 |
+
top.model_name: DeepSeek-Coder-7B-Instruct
|
5 |
+
top.quantization_bit: none
|
6 |
+
top.quantization_method: bitsandbytes
|
7 |
+
top.rope_scaling: llama3
|
8 |
+
top.template: deepseekcoder
|
9 |
+
train.additional_target: ''
|
10 |
+
train.apollo_rank: 256
|
11 |
+
train.apollo_scale: 1
|
12 |
+
train.apollo_target: all
|
13 |
+
train.apollo_update_interval: 200
|
14 |
+
train.badam_mode: layer
|
15 |
+
train.badam_switch_interval: 50
|
16 |
+
train.badam_switch_mode: ascending
|
17 |
+
train.badam_update_ratio: 0.05
|
18 |
+
train.batch_size: 16
|
19 |
+
train.compute_type: bf16
|
20 |
+
train.create_new_adapter: false
|
21 |
+
train.cutoff_len: 4096
|
22 |
+
train.dataset:
|
23 |
+
- codes3_query_filtered_330k_nlx
|
24 |
+
train.dataset_dir: data
|
25 |
+
train.ds_offload: false
|
26 |
+
train.ds_stage: none
|
27 |
+
train.extra_args: '{}'
|
28 |
+
train.freeze_extra_modules: ''
|
29 |
+
train.freeze_trainable_layers: 2
|
30 |
+
train.freeze_trainable_modules: all
|
31 |
+
train.galore_rank: 16
|
32 |
+
train.galore_scale: 2
|
33 |
+
train.galore_target: all
|
34 |
+
train.galore_update_interval: 200
|
35 |
+
train.gradient_accumulation_steps: 8
|
36 |
+
train.learning_rate: 5e-5
|
37 |
+
train.logging_steps: 1
|
38 |
+
train.lora_alpha: 16
|
39 |
+
train.lora_dropout: 0
|
40 |
+
train.lora_rank: 8
|
41 |
+
train.lora_target: ''
|
42 |
+
train.loraplus_lr_ratio: 0
|
43 |
+
train.lr_scheduler_type: cosine
|
44 |
+
train.mask_history: false
|
45 |
+
train.max_grad_norm: '1.0'
|
46 |
+
train.max_samples: '50000000'
|
47 |
+
train.neat_packing: true
|
48 |
+
train.neftune_alpha: 0
|
49 |
+
train.num_train_epochs: '1'
|
50 |
+
train.packing: true
|
51 |
+
train.ppo_score_norm: false
|
52 |
+
train.ppo_whiten_rewards: false
|
53 |
+
train.pref_beta: 0.1
|
54 |
+
train.pref_ftx: 0
|
55 |
+
train.pref_loss: sigmoid
|
56 |
+
train.report_to:
|
57 |
+
- none
|
58 |
+
train.resize_vocab: false
|
59 |
+
train.reward_model: null
|
60 |
+
train.save_steps: 500
|
61 |
+
train.swanlab_api_key: ''
|
62 |
+
train.swanlab_mode: cloud
|
63 |
+
train.swanlab_project: llamafactory
|
64 |
+
train.swanlab_run_name: ''
|
65 |
+
train.swanlab_workspace: ''
|
66 |
+
train.train_on_prompt: false
|
67 |
+
train.training_stage: Supervised Fine-Tuning
|
68 |
+
train.use_apollo: true
|
69 |
+
train.use_badam: false
|
70 |
+
train.use_dora: false
|
71 |
+
train.use_galore: false
|
72 |
+
train.use_llama_pro: true
|
73 |
+
train.use_pissa: false
|
74 |
+
train.use_rslora: false
|
75 |
+
train.use_swanlab: false
|
76 |
+
train.val_size: 0
|
77 |
+
train.warmup_steps: 0
|
model-00001-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:28aba7f82f5ce656674a3e922d68c0b7bd12d2f2e6da09e47fa72eec539c53c2
|
3 |
+
size 4987202208
|
model-00002-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b5f404a4192d64823438ee820dd36fc5516f8e7a8357e50e200be30aef86bc0
|
3 |
+
size 4980944400
|
model-00003-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2289ae058e1fc527bbf37c07cafa3dcee90e54f5e4597945a586a78a4e1509b0
|
3 |
+
size 4662149984
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 14630264832
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00003-of-00003.safetensors",
|
7 |
+
"model.embed_tokens.weight": "model-00001-of-00003.safetensors",
|
8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
13 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
14 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
15 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
16 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
17 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
18 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
19 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
20 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
21 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
22 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
23 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
24 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
25 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
26 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
27 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
28 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
29 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
30 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
31 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
32 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
33 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
34 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
35 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
36 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
37 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
38 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
39 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
40 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
41 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
42 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
43 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
44 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
45 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
46 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
47 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
48 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
49 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
50 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
51 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
52 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
53 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
54 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
55 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
56 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
57 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
58 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
59 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
60 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
61 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
62 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
63 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
64 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
65 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
66 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
67 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
68 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
69 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
70 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
71 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
72 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
73 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
74 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
75 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
76 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
77 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
78 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
79 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
80 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
81 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
82 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
83 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
84 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
85 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
86 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
87 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
88 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
89 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
90 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
91 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
92 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
93 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
94 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
95 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
96 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
97 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
98 |
+
"model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
99 |
+
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
100 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
101 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
102 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
103 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
104 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
105 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
106 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
107 |
+
"model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
108 |
+
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
109 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
110 |
+
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
111 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
112 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
113 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
114 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
115 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
116 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
117 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
118 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
119 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
120 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
121 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
122 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
123 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
124 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
125 |
+
"model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
126 |
+
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
127 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
128 |
+
"model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
129 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
130 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
131 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
132 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
133 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
134 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
135 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
136 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
137 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
138 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
139 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
140 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
141 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
142 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
143 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
144 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
145 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
146 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
147 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
148 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
149 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
150 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
151 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
152 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
153 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
154 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
155 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
156 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
157 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
158 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
159 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
160 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
161 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
162 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
163 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
164 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
165 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
166 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
167 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
168 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
169 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
170 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
171 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
172 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
173 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
174 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
175 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
176 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
177 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
178 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
179 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
180 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
181 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
182 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
183 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
184 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
185 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
186 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
187 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
188 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
189 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
190 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
191 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
192 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
193 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
194 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
195 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
196 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
197 |
+
"model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
198 |
+
"model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
199 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
200 |
+
"model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
201 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
202 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
203 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
204 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
205 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
206 |
+
"model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
207 |
+
"model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
208 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
209 |
+
"model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
210 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
211 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
212 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
213 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
214 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
215 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
216 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
217 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
218 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
219 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
220 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
221 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
222 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
223 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
224 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
225 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
226 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
227 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
228 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
229 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
230 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
231 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
232 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
233 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
234 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
235 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
236 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
237 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
238 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
239 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
240 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
241 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
242 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
243 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
244 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
245 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
246 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
247 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
248 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
249 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
250 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
251 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
252 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
253 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
254 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
255 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
256 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
257 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
258 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
259 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
260 |
+
"model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
261 |
+
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
262 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
263 |
+
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
264 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
265 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
266 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
267 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
268 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
269 |
+
"model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
270 |
+
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
271 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
272 |
+
"model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
273 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
274 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
275 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
276 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
277 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
278 |
+
"model.norm.weight": "model-00003-of-00003.safetensors"
|
279 |
+
}
|
280 |
+
}
|
running_log.txt
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[INFO|2025-05-12 09:58:27] tokenization_utils_base.py:2034 >> loading file tokenizer.model from cache at None
|
2 |
+
|
3 |
+
[INFO|2025-05-12 09:58:27] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at /home/kiho/.cache/huggingface/hub/models--deepseek-ai--deepseek-coder-7b-instruct-v1.5/snapshots/2a050a4c59d687a85324d32e147517992117ed30/tokenizer.json
|
4 |
+
|
5 |
+
[INFO|2025-05-12 09:58:27] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at None
|
6 |
+
|
7 |
+
[INFO|2025-05-12 09:58:27] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at None
|
8 |
+
|
9 |
+
[INFO|2025-05-12 09:58:27] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at /home/kiho/.cache/huggingface/hub/models--deepseek-ai--deepseek-coder-7b-instruct-v1.5/snapshots/2a050a4c59d687a85324d32e147517992117ed30/tokenizer_config.json
|
10 |
+
|
11 |
+
[INFO|2025-05-12 09:58:27] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None
|
12 |
+
|
13 |
+
[INFO|2025-05-12 09:58:28] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
14 |
+
|
15 |
+
[INFO|2025-05-12 09:58:29] configuration_utils.py:696 >> loading configuration file config.json from cache at /home/kiho/.cache/huggingface/hub/models--deepseek-ai--deepseek-coder-7b-instruct-v1.5/snapshots/2a050a4c59d687a85324d32e147517992117ed30/config.json
|
16 |
+
|
17 |
+
[INFO|2025-05-12 09:58:29] configuration_utils.py:768 >> Model config LlamaConfig {
|
18 |
+
"_name_or_path": "deepseek-ai/deepseek-coder-7b-instruct-v1.5",
|
19 |
+
"architectures": [
|
20 |
+
"LlamaForCausalLM"
|
21 |
+
],
|
22 |
+
"attention_bias": false,
|
23 |
+
"attention_dropout": 0.0,
|
24 |
+
"bos_token_id": 100000,
|
25 |
+
"eos_token_id": 100015,
|
26 |
+
"head_dim": 128,
|
27 |
+
"hidden_act": "silu",
|
28 |
+
"hidden_size": 4096,
|
29 |
+
"initializer_range": 0.02,
|
30 |
+
"intermediate_size": 11008,
|
31 |
+
"max_position_embeddings": 4096,
|
32 |
+
"mlp_bias": false,
|
33 |
+
"model_type": "llama",
|
34 |
+
"num_attention_heads": 32,
|
35 |
+
"num_hidden_layers": 30,
|
36 |
+
"num_key_value_heads": 32,
|
37 |
+
"pretraining_tp": 1,
|
38 |
+
"rms_norm_eps": 1e-06,
|
39 |
+
"rope_scaling": null,
|
40 |
+
"rope_theta": 10000.0,
|
41 |
+
"tie_word_embeddings": false,
|
42 |
+
"torch_dtype": "bfloat16",
|
43 |
+
"transformers_version": "4.48.2",
|
44 |
+
"use_cache": true,
|
45 |
+
"vocab_size": 102400
|
46 |
+
}
|
47 |
+
|
48 |
+
|
49 |
+
[INFO|2025-05-12 09:58:29] tokenization_utils_base.py:2034 >> loading file tokenizer.model from cache at None
|
50 |
+
|
51 |
+
[INFO|2025-05-12 09:58:29] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at /home/kiho/.cache/huggingface/hub/models--deepseek-ai--deepseek-coder-7b-instruct-v1.5/snapshots/2a050a4c59d687a85324d32e147517992117ed30/tokenizer.json
|
52 |
+
|
53 |
+
[INFO|2025-05-12 09:58:29] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at None
|
54 |
+
|
55 |
+
[INFO|2025-05-12 09:58:29] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at None
|
56 |
+
|
57 |
+
[INFO|2025-05-12 09:58:29] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at /home/kiho/.cache/huggingface/hub/models--deepseek-ai--deepseek-coder-7b-instruct-v1.5/snapshots/2a050a4c59d687a85324d32e147517992117ed30/tokenizer_config.json
|
58 |
+
|
59 |
+
[INFO|2025-05-12 09:58:29] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None
|
60 |
+
|
61 |
+
[INFO|2025-05-12 09:58:30] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
62 |
+
|
63 |
+
[INFO|2025-05-12 09:58:30] logging.py:157 >> Loading dataset Codes3_query_filtered_330k_nlx.json...
|
64 |
+
|
65 |
+
[INFO|2025-05-12 09:59:23] configuration_utils.py:696 >> loading configuration file config.json from cache at /home/kiho/.cache/huggingface/hub/models--deepseek-ai--deepseek-coder-7b-instruct-v1.5/snapshots/2a050a4c59d687a85324d32e147517992117ed30/config.json
|
66 |
+
|
67 |
+
[INFO|2025-05-12 09:59:23] configuration_utils.py:768 >> Model config LlamaConfig {
|
68 |
+
"_name_or_path": "deepseek-ai/deepseek-coder-7b-instruct-v1.5",
|
69 |
+
"architectures": [
|
70 |
+
"LlamaForCausalLM"
|
71 |
+
],
|
72 |
+
"attention_bias": false,
|
73 |
+
"attention_dropout": 0.0,
|
74 |
+
"bos_token_id": 100000,
|
75 |
+
"eos_token_id": 100015,
|
76 |
+
"head_dim": 128,
|
77 |
+
"hidden_act": "silu",
|
78 |
+
"hidden_size": 4096,
|
79 |
+
"initializer_range": 0.02,
|
80 |
+
"intermediate_size": 11008,
|
81 |
+
"max_position_embeddings": 4096,
|
82 |
+
"mlp_bias": false,
|
83 |
+
"model_type": "llama",
|
84 |
+
"num_attention_heads": 32,
|
85 |
+
"num_hidden_layers": 30,
|
86 |
+
"num_key_value_heads": 32,
|
87 |
+
"pretraining_tp": 1,
|
88 |
+
"rms_norm_eps": 1e-06,
|
89 |
+
"rope_scaling": null,
|
90 |
+
"rope_theta": 10000.0,
|
91 |
+
"tie_word_embeddings": false,
|
92 |
+
"torch_dtype": "bfloat16",
|
93 |
+
"transformers_version": "4.48.2",
|
94 |
+
"use_cache": true,
|
95 |
+
"vocab_size": 102400
|
96 |
+
}
|
97 |
+
|
98 |
+
|
99 |
+
[WARNING|2025-05-12 09:59:23] logging.py:162 >> Input length is smaller than max length. Consider increase input length.
|
100 |
+
|
101 |
+
[INFO|2025-05-12 09:59:23] logging.py:157 >> Using llama3 scaling strategy and setting scaling factor to 1.0.
|
102 |
+
|
103 |
+
[INFO|2025-05-12 09:59:23] logging.py:157 >> Using block diagonal attention for sequence packing without cross-attention.
|
104 |
+
|
105 |
+
[INFO|2025-05-12 09:59:23] logging.py:157 >> Liger kernel has been applied to the model.
|
106 |
+
|
107 |
+
[INFO|2025-05-12 09:59:24] modeling_utils.py:3904 >> loading weights file model.safetensors from cache at /home/kiho/.cache/huggingface/hub/models--deepseek-ai--deepseek-coder-7b-instruct-v1.5/snapshots/2a050a4c59d687a85324d32e147517992117ed30/model.safetensors.index.json
|
108 |
+
|
109 |
+
[INFO|2025-05-12 10:01:40] modeling_utils.py:1582 >> Instantiating LlamaForCausalLM model under default dtype torch.bfloat16.
|
110 |
+
|
111 |
+
[INFO|2025-05-12 10:01:40] configuration_utils.py:1140 >> Generate config GenerationConfig {
|
112 |
+
"bos_token_id": 100000,
|
113 |
+
"eos_token_id": 100015
|
114 |
+
}
|
115 |
+
|
116 |
+
|
117 |
+
[INFO|2025-05-12 10:01:45] modeling_utils.py:4888 >> All model checkpoint weights were used when initializing LlamaForCausalLM.
|
118 |
+
|
119 |
+
|
120 |
+
[INFO|2025-05-12 10:01:45] modeling_utils.py:4896 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at deepseek-ai/deepseek-coder-7b-instruct-v1.5.
|
121 |
+
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
|
122 |
+
|
123 |
+
[INFO|2025-05-12 10:01:46] configuration_utils.py:1095 >> loading configuration file generation_config.json from cache at /home/kiho/.cache/huggingface/hub/models--deepseek-ai--deepseek-coder-7b-instruct-v1.5/snapshots/2a050a4c59d687a85324d32e147517992117ed30/generation_config.json
|
124 |
+
|
125 |
+
[INFO|2025-05-12 10:01:46] configuration_utils.py:1140 >> Generate config GenerationConfig {
|
126 |
+
"bos_token_id": 100000,
|
127 |
+
"eos_token_id": 100015
|
128 |
+
}
|
129 |
+
|
130 |
+
|
131 |
+
[INFO|2025-05-12 10:01:46] logging.py:157 >> Gradient checkpointing enabled.
|
132 |
+
|
133 |
+
[INFO|2025-05-12 10:01:46] logging.py:157 >> Using torch SDPA for faster training and inference.
|
134 |
+
|
135 |
+
[INFO|2025-05-12 10:01:46] logging.py:157 >> Upcasting trainable params to float32.
|
136 |
+
|
137 |
+
[INFO|2025-05-12 10:01:46] logging.py:157 >> Fine-tuning method: Freeze
|
138 |
+
|
139 |
+
[INFO|2025-05-12 10:01:46] logging.py:157 >> Set trainable layers: .14.,.29.
|
140 |
+
|
141 |
+
[INFO|2025-05-12 10:01:46] logging.py:157 >> trainable params: 404,766,720 || all params: 6,910,365,696 || trainable%: 5.8574
|
142 |
+
|
143 |
+
[INFO|2025-05-12 10:01:46] trainer.py:741 >> Using auto half precision backend
|
144 |
+
|
145 |
+
[INFO|2025-05-12 10:01:47] logging.py:157 >> Found linear modules: k_proj,v_proj,o_proj,down_proj,q_proj,up_proj,gate_proj
|
146 |
+
|
147 |
+
[INFO|2025-05-12 10:01:47] logging.py:157 >> Using APOLLO optimizer with args: {'rank': 256, 'proj': 'random', 'proj_type': 'std', 'update_proj_gap': 200, 'scale': 1, 'scale_type': 'channel', 'scale_front': False}.
|
148 |
+
|
149 |
+
[INFO|2025-05-12 10:01:47] trainer.py:2369 >> ***** Running training *****
|
150 |
+
|
151 |
+
[INFO|2025-05-12 10:01:47] trainer.py:2370 >> Num examples = 35,434
|
152 |
+
|
153 |
+
[INFO|2025-05-12 10:01:47] trainer.py:2371 >> Num Epochs = 1
|
154 |
+
|
155 |
+
[INFO|2025-05-12 10:01:47] trainer.py:2372 >> Instantaneous batch size per device = 16
|
156 |
+
|
157 |
+
[INFO|2025-05-12 10:01:47] trainer.py:2375 >> Total train batch size (w. parallel, distributed & accumulation) = 512
|
158 |
+
|
159 |
+
[INFO|2025-05-12 10:01:47] trainer.py:2376 >> Gradient Accumulation steps = 8
|
160 |
+
|
161 |
+
[INFO|2025-05-12 10:01:47] trainer.py:2377 >> Total optimization steps = 69
|
162 |
+
|
163 |
+
[INFO|2025-05-12 10:01:47] trainer.py:2378 >> Number of trainable parameters = 404,766,720
|
164 |
+
|
165 |
+
[INFO|2025-05-12 10:04:36] logging.py:157 >> {'loss': 0.8402, 'learning_rate': 4.9974e-05, 'epoch': 0.01, 'throughput': 12482.16}
|
166 |
+
|
167 |
+
[INFO|2025-05-12 10:07:14] logging.py:157 >> {'loss': 0.7686, 'learning_rate': 4.9896e-05, 'epoch': 0.03, 'throughput': 12852.44}
|
168 |
+
|
169 |
+
[INFO|2025-05-12 10:09:52] logging.py:157 >> {'loss': 0.7563, 'learning_rate': 4.9767e-05, 'epoch': 0.04, 'throughput': 12981.50}
|
170 |
+
|
171 |
+
[INFO|2025-05-12 10:12:30] logging.py:157 >> {'loss': 0.7139, 'learning_rate': 4.9587e-05, 'epoch': 0.06, 'throughput': 13054.16}
|
172 |
+
|
173 |
+
[INFO|2025-05-12 10:15:08] logging.py:157 >> {'loss': 0.6793, 'learning_rate': 4.9355e-05, 'epoch': 0.07, 'throughput': 13098.17}
|
174 |
+
|
175 |
+
[INFO|2025-05-12 10:17:46] logging.py:157 >> {'loss': 0.6631, 'learning_rate': 4.9073e-05, 'epoch': 0.09, 'throughput': 13124.77}
|
176 |
+
|
177 |
+
[INFO|2025-05-12 10:20:25] logging.py:157 >> {'loss': 0.6400, 'learning_rate': 4.8741e-05, 'epoch': 0.10, 'throughput': 13144.27}
|
178 |
+
|
179 |
+
[INFO|2025-05-12 10:23:03] logging.py:157 >> {'loss': 0.6145, 'learning_rate': 4.8360e-05, 'epoch': 0.12, 'throughput': 13158.63}
|
180 |
+
|
181 |
+
[INFO|2025-05-12 10:25:41] logging.py:157 >> {'loss': 0.5997, 'learning_rate': 4.7930e-05, 'epoch': 0.13, 'throughput': 13173.26}
|
182 |
+
|
183 |
+
[INFO|2025-05-12 10:28:19] logging.py:157 >> {'loss': 0.5896, 'learning_rate': 4.7453e-05, 'epoch': 0.14, 'throughput': 13182.64}
|
184 |
+
|
185 |
+
[INFO|2025-05-12 10:30:56] logging.py:157 >> {'loss': 0.6025, 'learning_rate': 4.6930e-05, 'epoch': 0.16, 'throughput': 13191.88}
|
186 |
+
|
187 |
+
[INFO|2025-05-12 10:33:35] logging.py:157 >> {'loss': 0.5644, 'learning_rate': 4.6360e-05, 'epoch': 0.17, 'throughput': 13197.39}
|
188 |
+
|
189 |
+
[INFO|2025-05-12 10:36:12] logging.py:157 >> {'loss': 0.5558, 'learning_rate': 4.5747e-05, 'epoch': 0.19, 'throughput': 13204.31}
|
190 |
+
|
191 |
+
[INFO|2025-05-12 10:38:51] logging.py:157 >> {'loss': 0.5705, 'learning_rate': 4.5091e-05, 'epoch': 0.20, 'throughput': 13208.35}
|
192 |
+
|
193 |
+
[INFO|2025-05-12 10:41:29] logging.py:157 >> {'loss': 0.5694, 'learning_rate': 4.4393e-05, 'epoch': 0.22, 'throughput': 13212.19}
|
194 |
+
|
195 |
+
[INFO|2025-05-12 10:44:06] logging.py:157 >> {'loss': 0.5590, 'learning_rate': 4.3655e-05, 'epoch': 0.23, 'throughput': 13217.11}
|
196 |
+
|
197 |
+
[INFO|2025-05-12 10:46:44] logging.py:157 >> {'loss': 0.5628, 'learning_rate': 4.2878e-05, 'epoch': 0.25, 'throughput': 13220.46}
|
198 |
+
|
199 |
+
[INFO|2025-05-12 10:49:23] logging.py:157 >> {'loss': 0.5553, 'learning_rate': 4.2064e-05, 'epoch': 0.26, 'throughput': 13223.11}
|
200 |
+
|
201 |
+
[INFO|2025-05-12 10:52:01] logging.py:157 >> {'loss': 0.5401, 'learning_rate': 4.1215e-05, 'epoch': 0.27, 'throughput': 13225.48}
|
202 |
+
|
203 |
+
[INFO|2025-05-12 10:54:39] logging.py:157 >> {'loss': 0.5750, 'learning_rate': 4.0332e-05, 'epoch': 0.29, 'throughput': 13226.89}
|
204 |
+
|
205 |
+
[INFO|2025-05-12 10:57:17] logging.py:157 >> {'loss': 0.5398, 'learning_rate': 3.9417e-05, 'epoch': 0.30, 'throughput': 13228.58}
|
206 |
+
|
207 |
+
[INFO|2025-05-12 10:59:55] logging.py:157 >> {'loss': 0.5389, 'learning_rate': 3.8472e-05, 'epoch': 0.32, 'throughput': 13231.10}
|
208 |
+
|
209 |
+
[INFO|2025-05-12 11:02:32] logging.py:157 >> {'loss': 0.5451, 'learning_rate': 3.7500e-05, 'epoch': 0.33, 'throughput': 13234.31}
|
210 |
+
|
211 |
+
[INFO|2025-05-12 11:05:11] logging.py:157 >> {'loss': 0.5558, 'learning_rate': 3.6502e-05, 'epoch': 0.35, 'throughput': 13235.05}
|
212 |
+
|
213 |
+
[INFO|2025-05-12 11:07:48] logging.py:157 >> {'loss': 0.5403, 'learning_rate': 3.5479e-05, 'epoch': 0.36, 'throughput': 13238.00}
|
214 |
+
|
215 |
+
[INFO|2025-05-12 11:10:26] logging.py:157 >> {'loss': 0.5405, 'learning_rate': 3.4435e-05, 'epoch': 0.38, 'throughput': 13240.74}
|
216 |
+
|
217 |
+
[INFO|2025-05-12 11:13:04] logging.py:157 >> {'loss': 0.5577, 'learning_rate': 3.3372e-05, 'epoch': 0.39, 'throughput': 13242.13}
|
218 |
+
|
219 |
+
[INFO|2025-05-12 11:15:42] logging.py:157 >> {'loss': 0.5345, 'learning_rate': 3.2291e-05, 'epoch': 0.40, 'throughput': 13243.46}
|
220 |
+
|
221 |
+
[INFO|2025-05-12 11:18:20] logging.py:157 >> {'loss': 0.5484, 'learning_rate': 3.1195e-05, 'epoch': 0.42, 'throughput': 13243.76}
|
222 |
+
|
223 |
+
[INFO|2025-05-12 11:20:58] logging.py:157 >> {'loss': 0.5485, 'learning_rate': 3.0086e-05, 'epoch': 0.43, 'throughput': 13245.01}
|
224 |
+
|
225 |
+
[INFO|2025-05-12 11:23:36] logging.py:157 >> {'loss': 0.5299, 'learning_rate': 2.8967e-05, 'epoch': 0.45, 'throughput': 13245.72}
|
226 |
+
|
227 |
+
[INFO|2025-05-12 11:26:14] logging.py:157 >> {'loss': 0.5390, 'learning_rate': 2.7840e-05, 'epoch': 0.46, 'throughput': 13246.45}
|
228 |
+
|
229 |
+
[INFO|2025-05-12 11:28:52] logging.py:157 >> {'loss': 0.5248, 'learning_rate': 2.6706e-05, 'epoch': 0.48, 'throughput': 13247.54}
|
230 |
+
|
231 |
+
[INFO|2025-05-12 11:31:30] logging.py:157 >> {'loss': 0.5342, 'learning_rate': 2.5569e-05, 'epoch': 0.49, 'throughput': 13248.46}
|
232 |
+
|
233 |
+
[INFO|2025-05-12 11:34:08] logging.py:157 >> {'loss': 0.5431, 'learning_rate': 2.4431e-05, 'epoch': 0.51, 'throughput': 13249.53}
|
234 |
+
|
235 |
+
[INFO|2025-05-12 11:36:45] logging.py:157 >> {'loss': 0.5471, 'learning_rate': 2.3294e-05, 'epoch': 0.52, 'throughput': 13250.51}
|
236 |
+
|
237 |
+
[INFO|2025-05-12 11:39:23] logging.py:157 >> {'loss': 0.5420, 'learning_rate': 2.2160e-05, 'epoch': 0.53, 'throughput': 13251.85}
|
238 |
+
|
239 |
+
[INFO|2025-05-12 11:42:01] logging.py:157 >> {'loss': 0.5420, 'learning_rate': 2.1033e-05, 'epoch': 0.55, 'throughput': 13253.57}
|
240 |
+
|
241 |
+
[INFO|2025-05-12 11:44:38] logging.py:157 >> {'loss': 0.5420, 'learning_rate': 1.9914e-05, 'epoch': 0.56, 'throughput': 13255.10}
|
242 |
+
|
243 |
+
[INFO|2025-05-12 11:47:16] logging.py:157 >> {'loss': 0.5425, 'learning_rate': 1.8805e-05, 'epoch': 0.58, 'throughput': 13256.36}
|
244 |
+
|
245 |
+
[INFO|2025-05-12 11:49:54] logging.py:157 >> {'loss': 0.5482, 'learning_rate': 1.7709e-05, 'epoch': 0.59, 'throughput': 13257.04}
|
246 |
+
|
247 |
+
[INFO|2025-05-12 11:52:31] logging.py:157 >> {'loss': 0.5598, 'learning_rate': 1.6628e-05, 'epoch': 0.61, 'throughput': 13258.73}
|
248 |
+
|
249 |
+
[INFO|2025-05-12 11:55:08] logging.py:157 >> {'loss': 0.5402, 'learning_rate': 1.5565e-05, 'epoch': 0.62, 'throughput': 13260.29}
|
250 |
+
|
251 |
+
[INFO|2025-05-12 11:57:46] logging.py:157 >> {'loss': 0.5413, 'learning_rate': 1.4521e-05, 'epoch': 0.64, 'throughput': 13261.50}
|
252 |
+
|
253 |
+
[INFO|2025-05-12 12:00:24] logging.py:157 >> {'loss': 0.5560, 'learning_rate': 1.3498e-05, 'epoch': 0.65, 'throughput': 13262.39}
|
254 |
+
|
255 |
+
[INFO|2025-05-12 12:03:01] logging.py:157 >> {'loss': 0.5341, 'learning_rate': 1.2500e-05, 'epoch': 0.66, 'throughput': 13263.23}
|
256 |
+
|
257 |
+
[INFO|2025-05-12 12:05:39] logging.py:157 >> {'loss': 0.5436, 'learning_rate': 1.1528e-05, 'epoch': 0.68, 'throughput': 13264.02}
|
258 |
+
|
259 |
+
[INFO|2025-05-12 12:08:16] logging.py:157 >> {'loss': 0.5393, 'learning_rate': 1.0583e-05, 'epoch': 0.69, 'throughput': 13264.84}
|
260 |
+
|
261 |
+
[INFO|2025-05-12 12:10:54] logging.py:157 >> {'loss': 0.5598, 'learning_rate': 9.6683e-06, 'epoch': 0.71, 'throughput': 13265.65}
|
262 |
+
|
263 |
+
[INFO|2025-05-12 12:13:32] logging.py:157 >> {'loss': 0.5329, 'learning_rate': 8.7854e-06, 'epoch': 0.72, 'throughput': 13265.87}
|
264 |
+
|
265 |
+
[INFO|2025-05-12 12:16:10] logging.py:157 >> {'loss': 0.5384, 'learning_rate': 7.9362e-06, 'epoch': 0.74, 'throughput': 13266.73}
|
266 |
+
|
267 |
+
[INFO|2025-05-12 12:18:47] logging.py:157 >> {'loss': 0.5447, 'learning_rate': 7.1223e-06, 'epoch': 0.75, 'throughput': 13267.67}
|
268 |
+
|
269 |
+
[INFO|2025-05-12 12:21:25] logging.py:157 >> {'loss': 0.5291, 'learning_rate': 6.3454e-06, 'epoch': 0.77, 'throughput': 13268.20}
|
270 |
+
|
271 |
+
[INFO|2025-05-12 12:24:02] logging.py:157 >> {'loss': 0.5258, 'learning_rate': 5.6072e-06, 'epoch': 0.78, 'throughput': 13269.48}
|
272 |
+
|
273 |
+
[INFO|2025-05-12 12:26:40] logging.py:157 >> {'loss': 0.5508, 'learning_rate': 4.9092e-06, 'epoch': 0.79, 'throughput': 13270.12}
|
274 |
+
|
275 |
+
[INFO|2025-05-12 12:29:18] logging.py:157 >> {'loss': 0.5439, 'learning_rate': 4.2529e-06, 'epoch': 0.81, 'throughput': 13270.24}
|
276 |
+
|
277 |
+
[INFO|2025-05-12 12:31:56] logging.py:157 >> {'loss': 0.5261, 'learning_rate': 3.6395e-06, 'epoch': 0.82, 'throughput': 13270.03}
|
278 |
+
|
279 |
+
[INFO|2025-05-12 12:34:34] logging.py:157 >> {'loss': 0.5518, 'learning_rate': 3.0704e-06, 'epoch': 0.84, 'throughput': 13270.36}
|
280 |
+
|
281 |
+
[INFO|2025-05-12 12:37:11] logging.py:157 >> {'loss': 0.5444, 'learning_rate': 2.5468e-06, 'epoch': 0.85, 'throughput': 13270.70}
|
282 |
+
|
283 |
+
[INFO|2025-05-12 12:39:49] logging.py:157 >> {'loss': 0.5469, 'learning_rate': 2.0697e-06, 'epoch': 0.87, 'throughput': 13270.96}
|
284 |
+
|
285 |
+
[INFO|2025-05-12 12:42:27] logging.py:157 >> {'loss': 0.5335, 'learning_rate': 1.6402e-06, 'epoch': 0.88, 'throughput': 13271.26}
|
286 |
+
|
287 |
+
[INFO|2025-05-12 12:45:04] logging.py:157 >> {'loss': 0.5367, 'learning_rate': 1.2590e-06, 'epoch': 0.90, 'throughput': 13272.14}
|
288 |
+
|
289 |
+
[INFO|2025-05-12 12:47:42] logging.py:157 >> {'loss': 0.5510, 'learning_rate': 9.2707e-07, 'epoch': 0.91, 'throughput': 13272.46}
|
290 |
+
|
291 |
+
[INFO|2025-05-12 12:50:20] logging.py:157 >> {'loss': 0.5597, 'learning_rate': 6.4502e-07, 'epoch': 0.92, 'throughput': 13272.79}
|
292 |
+
|
293 |
+
[INFO|2025-05-12 12:52:57] logging.py:157 >> {'loss': 0.5369, 'learning_rate': 4.1346e-07, 'epoch': 0.94, 'throughput': 13273.81}
|
294 |
+
|
295 |
+
[INFO|2025-05-12 12:55:33] logging.py:157 >> {'loss': 0.5451, 'learning_rate': 2.3285e-07, 'epoch': 0.95, 'throughput': 13276.09}
|
296 |
+
|
297 |
+
[INFO|2025-05-12 12:58:09] logging.py:157 >> {'loss': 0.5237, 'learning_rate': 1.0358e-07, 'epoch': 0.97, 'throughput': 13279.42}
|
298 |
+
|
299 |
+
[INFO|2025-05-12 13:00:44] logging.py:157 >> {'loss': 0.5604, 'learning_rate': 2.5908e-08, 'epoch': 0.98, 'throughput': 13283.18}
|
300 |
+
|
301 |
+
[INFO|2025-05-12 13:03:19] logging.py:157 >> {'loss': 0.5340, 'learning_rate': 0.0000e+00, 'epoch': 1.00, 'throughput': 13286.80}
|
302 |
+
|
303 |
+
[INFO|2025-05-12 13:03:19] trainer.py:3910 >> Saving model checkpoint to saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/checkpoint-69
|
304 |
+
|
305 |
+
[INFO|2025-05-12 13:03:19] configuration_utils.py:420 >> Configuration saved in saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/checkpoint-69/config.json
|
306 |
+
|
307 |
+
[INFO|2025-05-12 13:03:19] configuration_utils.py:909 >> Configuration saved in saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/checkpoint-69/generation_config.json
|
308 |
+
|
309 |
+
[INFO|2025-05-12 13:03:40] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 3 checkpoint shards. You can find where each parameters has been saved in the index located at saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/checkpoint-69/model.safetensors.index.json.
|
310 |
+
|
311 |
+
[INFO|2025-05-12 13:03:40] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/checkpoint-69/tokenizer_config.json
|
312 |
+
|
313 |
+
[INFO|2025-05-12 13:03:40] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/checkpoint-69/special_tokens_map.json
|
314 |
+
|
315 |
+
[INFO|2025-05-12 13:03:40] trainer.py:2643 >>
|
316 |
+
|
317 |
+
Training completed. Do not forget to share your model on huggingface.co/models =)
|
318 |
+
|
319 |
+
|
320 |
+
|
321 |
+
[INFO|2025-05-12 13:03:40] trainer.py:3910 >> Saving model checkpoint to saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k
|
322 |
+
|
323 |
+
[INFO|2025-05-12 13:03:40] configuration_utils.py:420 >> Configuration saved in saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/config.json
|
324 |
+
|
325 |
+
[INFO|2025-05-12 13:03:40] configuration_utils.py:909 >> Configuration saved in saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/generation_config.json
|
326 |
+
|
327 |
+
[INFO|2025-05-12 13:04:02] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 3 checkpoint shards. You can find where each parameters has been saved in the index located at saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/model.safetensors.index.json.
|
328 |
+
|
329 |
+
[INFO|2025-05-12 13:04:02] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/tokenizer_config.json
|
330 |
+
|
331 |
+
[INFO|2025-05-12 13:04:02] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k/special_tokens_map.json
|
332 |
+
|
333 |
+
[WARNING|2025-05-12 13:04:02] logging.py:162 >> No metric eval_loss to plot.
|
334 |
+
|
335 |
+
[WARNING|2025-05-12 13:04:02] logging.py:162 >> No metric eval_accuracy to plot.
|
336 |
+
|
337 |
+
[INFO|2025-05-12 13:04:02] modelcard.py:449 >> Dropping the following result as it does not have all the necessary fields:
|
338 |
+
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
339 |
+
|
special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|begin▁of▁sentence|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|EOT|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<|end▁of▁sentence|>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": true,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": null,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"100000": {
|
7 |
+
"content": "<|begin▁of▁sentence|>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": true,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"100001": {
|
15 |
+
"content": "<|end▁of▁sentence|>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": true,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"100002": {
|
23 |
+
"content": "ø",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": true,
|
26 |
+
"rstrip": false,
|
27 |
+
"single_word": false,
|
28 |
+
"special": false
|
29 |
+
},
|
30 |
+
"100003": {
|
31 |
+
"content": "ö",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": true,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false,
|
36 |
+
"special": false
|
37 |
+
},
|
38 |
+
"100004": {
|
39 |
+
"content": "ú",
|
40 |
+
"lstrip": false,
|
41 |
+
"normalized": true,
|
42 |
+
"rstrip": false,
|
43 |
+
"single_word": false,
|
44 |
+
"special": false
|
45 |
+
},
|
46 |
+
"100005": {
|
47 |
+
"content": "ÿ",
|
48 |
+
"lstrip": false,
|
49 |
+
"normalized": true,
|
50 |
+
"rstrip": false,
|
51 |
+
"single_word": false,
|
52 |
+
"special": false
|
53 |
+
},
|
54 |
+
"100006": {
|
55 |
+
"content": "õ",
|
56 |
+
"lstrip": false,
|
57 |
+
"normalized": true,
|
58 |
+
"rstrip": false,
|
59 |
+
"single_word": false,
|
60 |
+
"special": false
|
61 |
+
},
|
62 |
+
"100007": {
|
63 |
+
"content": "÷",
|
64 |
+
"lstrip": false,
|
65 |
+
"normalized": true,
|
66 |
+
"rstrip": false,
|
67 |
+
"single_word": false,
|
68 |
+
"special": false
|
69 |
+
},
|
70 |
+
"100008": {
|
71 |
+
"content": "û",
|
72 |
+
"lstrip": false,
|
73 |
+
"normalized": true,
|
74 |
+
"rstrip": false,
|
75 |
+
"single_word": false,
|
76 |
+
"special": false
|
77 |
+
},
|
78 |
+
"100009": {
|
79 |
+
"content": "ý",
|
80 |
+
"lstrip": false,
|
81 |
+
"normalized": true,
|
82 |
+
"rstrip": false,
|
83 |
+
"single_word": false,
|
84 |
+
"special": false
|
85 |
+
},
|
86 |
+
"100010": {
|
87 |
+
"content": "À",
|
88 |
+
"lstrip": false,
|
89 |
+
"normalized": true,
|
90 |
+
"rstrip": false,
|
91 |
+
"single_word": false,
|
92 |
+
"special": false
|
93 |
+
},
|
94 |
+
"100011": {
|
95 |
+
"content": "ù",
|
96 |
+
"lstrip": false,
|
97 |
+
"normalized": true,
|
98 |
+
"rstrip": false,
|
99 |
+
"single_word": false,
|
100 |
+
"special": false
|
101 |
+
},
|
102 |
+
"100012": {
|
103 |
+
"content": "Á",
|
104 |
+
"lstrip": false,
|
105 |
+
"normalized": true,
|
106 |
+
"rstrip": false,
|
107 |
+
"single_word": false,
|
108 |
+
"special": false
|
109 |
+
},
|
110 |
+
"100013": {
|
111 |
+
"content": "þ",
|
112 |
+
"lstrip": false,
|
113 |
+
"normalized": true,
|
114 |
+
"rstrip": false,
|
115 |
+
"single_word": false,
|
116 |
+
"special": false
|
117 |
+
},
|
118 |
+
"100014": {
|
119 |
+
"content": "ü",
|
120 |
+
"lstrip": false,
|
121 |
+
"normalized": true,
|
122 |
+
"rstrip": false,
|
123 |
+
"single_word": false,
|
124 |
+
"special": false
|
125 |
+
},
|
126 |
+
"100015": {
|
127 |
+
"content": "<|EOT|>",
|
128 |
+
"lstrip": false,
|
129 |
+
"normalized": true,
|
130 |
+
"rstrip": false,
|
131 |
+
"single_word": false,
|
132 |
+
"special": true
|
133 |
+
}
|
134 |
+
},
|
135 |
+
"bos_token": "<|begin▁of▁sentence|>",
|
136 |
+
"chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}",
|
137 |
+
"clean_up_tokenization_spaces": false,
|
138 |
+
"eos_token": "<|EOT|>",
|
139 |
+
"extra_special_tokens": {},
|
140 |
+
"legacy": true,
|
141 |
+
"model_max_length": 4096,
|
142 |
+
"pad_token": "<|end▁of▁sentence|>",
|
143 |
+
"padding_side": "right",
|
144 |
+
"sp_model_kwargs": {},
|
145 |
+
"split_special_tokens": false,
|
146 |
+
"tokenizer_class": "LlamaTokenizer",
|
147 |
+
"unk_token": null,
|
148 |
+
"use_default_system_prompt": false
|
149 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 0.9963898916967509,
|
3 |
+
"num_input_tokens_seen": 144703488,
|
4 |
+
"total_flos": 5.635565866281075e+18,
|
5 |
+
"train_loss": 0.5665888682655666,
|
6 |
+
"train_runtime": 10913.0245,
|
7 |
+
"train_samples_per_second": 3.247,
|
8 |
+
"train_steps_per_second": 0.006
|
9 |
+
}
|
trainer_log.jsonl
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"current_steps": 1, "total_steps": 69, "loss": 0.8402, "lr": 4.9974091841168195e-05, "epoch": 0.01444043321299639, "percentage": 1.45, "elapsed_time": "0:02:48", "remaining_time": "3:10:24", "throughput": 12482.16, "total_tokens": 2097152}
|
2 |
+
{"current_steps": 2, "total_steps": 69, "loss": 0.7686, "lr": 4.9896421063288286e-05, "epoch": 0.02888086642599278, "percentage": 2.9, "elapsed_time": "0:05:26", "remaining_time": "3:02:12", "throughput": 12852.44, "total_tokens": 4194304}
|
3 |
+
{"current_steps": 3, "total_steps": 69, "loss": 0.7563, "lr": 4.976714865090827e-05, "epoch": 0.04332129963898917, "percentage": 4.35, "elapsed_time": "0:08:04", "remaining_time": "2:57:42", "throughput": 12981.5, "total_tokens": 6291456}
|
4 |
+
{"current_steps": 4, "total_steps": 69, "loss": 0.7139, "lr": 4.958654254084355e-05, "epoch": 0.05776173285198556, "percentage": 5.8, "elapsed_time": "0:10:42", "remaining_time": "2:54:02", "throughput": 13054.16, "total_tokens": 8388608}
|
5 |
+
{"current_steps": 5, "total_steps": 69, "loss": 0.6793, "lr": 4.9354977066836986e-05, "epoch": 0.07220216606498195, "percentage": 7.25, "elapsed_time": "0:13:20", "remaining_time": "2:50:47", "throughput": 13098.17, "total_tokens": 10485760}
|
6 |
+
{"current_steps": 6, "total_steps": 69, "loss": 0.6631, "lr": 4.907293218369499e-05, "epoch": 0.08664259927797834, "percentage": 8.7, "elapsed_time": "0:15:58", "remaining_time": "2:47:46", "throughput": 13124.77, "total_tokens": 12582912}
|
7 |
+
{"current_steps": 7, "total_steps": 69, "loss": 0.64, "lr": 4.874099247250798e-05, "epoch": 0.10108303249097472, "percentage": 10.14, "elapsed_time": "0:18:36", "remaining_time": "2:44:52", "throughput": 13144.27, "total_tokens": 14680064}
|
8 |
+
{"current_steps": 8, "total_steps": 69, "loss": 0.6145, "lr": 4.835984592901678e-05, "epoch": 0.11552346570397112, "percentage": 11.59, "elapsed_time": "0:21:14", "remaining_time": "2:42:01", "throughput": 13158.63, "total_tokens": 16777216}
|
9 |
+
{"current_steps": 9, "total_steps": 69, "loss": 0.5997, "lr": 4.793028253763633e-05, "epoch": 0.1299638989169675, "percentage": 13.04, "elapsed_time": "0:23:52", "remaining_time": "2:39:11", "throughput": 13173.26, "total_tokens": 18874368}
|
10 |
+
{"current_steps": 10, "total_steps": 69, "loss": 0.5896, "lr": 4.74531926340924e-05, "epoch": 0.1444043321299639, "percentage": 14.49, "elapsed_time": "0:26:30", "remaining_time": "2:36:25", "throughput": 13182.64, "total_tokens": 20971520}
|
11 |
+
{"current_steps": 11, "total_steps": 69, "loss": 0.6025, "lr": 4.6929565060064864e-05, "epoch": 0.1588447653429603, "percentage": 15.94, "elapsed_time": "0:29:08", "remaining_time": "2:33:40", "throughput": 13191.88, "total_tokens": 23068672}
|
12 |
+
{"current_steps": 12, "total_steps": 69, "loss": 0.5644, "lr": 4.6360485113662216e-05, "epoch": 0.17328519855595667, "percentage": 17.39, "elapsed_time": "0:31:46", "remaining_time": "2:30:57", "throughput": 13197.39, "total_tokens": 25165824}
|
13 |
+
{"current_steps": 13, "total_steps": 69, "loss": 0.5558, "lr": 4.574713229997563e-05, "epoch": 0.18772563176895307, "percentage": 18.84, "elapsed_time": "0:34:24", "remaining_time": "2:28:14", "throughput": 13204.31, "total_tokens": 27262976}
|
14 |
+
{"current_steps": 14, "total_steps": 69, "loss": 0.5705, "lr": 4.509077788637446e-05, "epoch": 0.20216606498194944, "percentage": 20.29, "elapsed_time": "0:37:02", "remaining_time": "2:25:32", "throughput": 13208.35, "total_tokens": 29360128}
|
15 |
+
{"current_steps": 15, "total_steps": 69, "loss": 0.5694, "lr": 4.43927822676105e-05, "epoch": 0.21660649819494585, "percentage": 21.74, "elapsed_time": "0:39:40", "remaining_time": "2:22:51", "throughput": 13212.19, "total_tokens": 31457280}
|
16 |
+
{"current_steps": 16, "total_steps": 69, "loss": 0.559, "lr": 4.365459214619214e-05, "epoch": 0.23104693140794225, "percentage": 23.19, "elapsed_time": "0:42:18", "remaining_time": "2:20:09", "throughput": 13217.11, "total_tokens": 33554432}
|
17 |
+
{"current_steps": 17, "total_steps": 69, "loss": 0.5628, "lr": 4.2877737533872485e-05, "epoch": 0.24548736462093862, "percentage": 24.64, "elapsed_time": "0:44:56", "remaining_time": "2:17:28", "throughput": 13220.46, "total_tokens": 35651584}
|
18 |
+
{"current_steps": 18, "total_steps": 69, "loss": 0.5553, "lr": 4.206382858046636e-05, "epoch": 0.259927797833935, "percentage": 26.09, "elapsed_time": "0:47:34", "remaining_time": "2:14:48", "throughput": 13223.11, "total_tokens": 37748736}
|
19 |
+
{"current_steps": 19, "total_steps": 69, "loss": 0.5401, "lr": 4.12145522365689e-05, "epoch": 0.2743682310469314, "percentage": 27.54, "elapsed_time": "0:50:12", "remaining_time": "2:12:08", "throughput": 13225.48, "total_tokens": 39845888}
|
20 |
+
{"current_steps": 20, "total_steps": 69, "loss": 0.575, "lr": 4.033166875709291e-05, "epoch": 0.2888086642599278, "percentage": 28.99, "elapsed_time": "0:52:51", "remaining_time": "2:09:29", "throughput": 13226.89, "total_tokens": 41943040}
|
21 |
+
{"current_steps": 21, "total_steps": 69, "loss": 0.5398, "lr": 3.941700805287168e-05, "epoch": 0.30324909747292417, "percentage": 30.43, "elapsed_time": "0:55:29", "remaining_time": "2:06:49", "throughput": 13228.58, "total_tokens": 44040192}
|
22 |
+
{"current_steps": 22, "total_steps": 69, "loss": 0.5389, "lr": 3.8472465897889394e-05, "epoch": 0.3176895306859206, "percentage": 31.88, "elapsed_time": "0:58:07", "remaining_time": "2:04:09", "throughput": 13231.1, "total_tokens": 46137344}
|
23 |
+
{"current_steps": 23, "total_steps": 69, "loss": 0.5451, "lr": 3.7500000000000003e-05, "epoch": 0.33212996389891697, "percentage": 33.33, "elapsed_time": "1:00:44", "remaining_time": "2:01:29", "throughput": 13234.31, "total_tokens": 48234496}
|
24 |
+
{"current_steps": 24, "total_steps": 69, "loss": 0.5558, "lr": 3.6501625943278805e-05, "epoch": 0.34657039711191334, "percentage": 34.78, "elapsed_time": "1:03:22", "remaining_time": "1:58:50", "throughput": 13235.05, "total_tokens": 50331648}
|
25 |
+
{"current_steps": 25, "total_steps": 69, "loss": 0.5403, "lr": 3.547941301041661e-05, "epoch": 0.36101083032490977, "percentage": 36.23, "elapsed_time": "1:06:00", "remaining_time": "1:56:10", "throughput": 13238.0, "total_tokens": 52428800}
|
26 |
+
{"current_steps": 26, "total_steps": 69, "loss": 0.5405, "lr": 3.443547989381536e-05, "epoch": 0.37545126353790614, "percentage": 37.68, "elapsed_time": "1:08:38", "remaining_time": "1:53:30", "throughput": 13240.74, "total_tokens": 54525952}
|
27 |
+
{"current_steps": 27, "total_steps": 69, "loss": 0.5577, "lr": 3.3371990304274656e-05, "epoch": 0.3898916967509025, "percentage": 39.13, "elapsed_time": "1:11:15", "remaining_time": "1:50:51", "throughput": 13242.13, "total_tokens": 56623104}
|
28 |
+
{"current_steps": 28, "total_steps": 69, "loss": 0.5345, "lr": 3.2291148486370626e-05, "epoch": 0.4043321299638989, "percentage": 40.58, "elapsed_time": "1:13:53", "remaining_time": "1:48:12", "throughput": 13243.46, "total_tokens": 58720256}
|
29 |
+
{"current_steps": 29, "total_steps": 69, "loss": 0.5484, "lr": 3.11951946498225e-05, "epoch": 0.4187725631768953, "percentage": 42.03, "elapsed_time": "1:16:32", "remaining_time": "1:45:34", "throughput": 13243.76, "total_tokens": 60817408}
|
30 |
+
{"current_steps": 30, "total_steps": 69, "loss": 0.5485, "lr": 3.008640032631585e-05, "epoch": 0.4332129963898917, "percentage": 43.48, "elapsed_time": "1:19:10", "remaining_time": "1:42:55", "throughput": 13245.01, "total_tokens": 62914560}
|
31 |
+
{"current_steps": 31, "total_steps": 69, "loss": 0.5299, "lr": 2.8967063661406285e-05, "epoch": 0.44765342960288806, "percentage": 44.93, "elapsed_time": "1:21:48", "remaining_time": "1:40:16", "throughput": 13245.72, "total_tokens": 65011712}
|
32 |
+
{"current_steps": 32, "total_steps": 69, "loss": 0.539, "lr": 2.7839504651261872e-05, "epoch": 0.4620938628158845, "percentage": 46.38, "elapsed_time": "1:24:26", "remaining_time": "1:37:37", "throughput": 13246.45, "total_tokens": 67108864}
|
33 |
+
{"current_steps": 33, "total_steps": 69, "loss": 0.5248, "lr": 2.6706060334116777e-05, "epoch": 0.47653429602888087, "percentage": 47.83, "elapsed_time": "1:27:04", "remaining_time": "1:34:58", "throughput": 13247.54, "total_tokens": 69206016}
|
34 |
+
{"current_steps": 34, "total_steps": 69, "loss": 0.5342, "lr": 2.556907994640264e-05, "epoch": 0.49097472924187724, "percentage": 49.28, "elapsed_time": "1:29:41", "remaining_time": "1:32:20", "throughput": 13248.46, "total_tokens": 71303168}
|
35 |
+
{"current_steps": 35, "total_steps": 69, "loss": 0.5431, "lr": 2.4430920053597356e-05, "epoch": 0.5054151624548736, "percentage": 50.72, "elapsed_time": "1:32:19", "remaining_time": "1:29:41", "throughput": 13249.53, "total_tokens": 73400320}
|
36 |
+
{"current_steps": 36, "total_steps": 69, "loss": 0.5471, "lr": 2.329393966588323e-05, "epoch": 0.51985559566787, "percentage": 52.17, "elapsed_time": "1:34:57", "remaining_time": "1:27:02", "throughput": 13250.51, "total_tokens": 75497472}
|
37 |
+
{"current_steps": 37, "total_steps": 69, "loss": 0.542, "lr": 2.2160495348738123e-05, "epoch": 0.5342960288808665, "percentage": 53.62, "elapsed_time": "1:37:35", "remaining_time": "1:24:24", "throughput": 13251.85, "total_tokens": 77594624}
|
38 |
+
{"current_steps": 38, "total_steps": 69, "loss": 0.542, "lr": 2.1032936338593718e-05, "epoch": 0.5487364620938628, "percentage": 55.07, "elapsed_time": "1:40:12", "remaining_time": "1:21:45", "throughput": 13253.57, "total_tokens": 79691776}
|
39 |
+
{"current_steps": 39, "total_steps": 69, "loss": 0.542, "lr": 1.991359967368416e-05, "epoch": 0.5631768953068592, "percentage": 56.52, "elapsed_time": "1:42:50", "remaining_time": "1:19:06", "throughput": 13255.1, "total_tokens": 81788928}
|
40 |
+
{"current_steps": 40, "total_steps": 69, "loss": 0.5425, "lr": 1.8804805350177505e-05, "epoch": 0.5776173285198556, "percentage": 57.97, "elapsed_time": "1:45:27", "remaining_time": "1:16:27", "throughput": 13256.36, "total_tokens": 83886080}
|
41 |
+
{"current_steps": 41, "total_steps": 69, "loss": 0.5482, "lr": 1.7708851513629377e-05, "epoch": 0.592057761732852, "percentage": 59.42, "elapsed_time": "1:48:05", "remaining_time": "1:13:49", "throughput": 13257.04, "total_tokens": 85983232}
|
42 |
+
{"current_steps": 42, "total_steps": 69, "loss": 0.5598, "lr": 1.6628009695725346e-05, "epoch": 0.6064981949458483, "percentage": 60.87, "elapsed_time": "1:50:43", "remaining_time": "1:11:10", "throughput": 13258.73, "total_tokens": 88080384}
|
43 |
+
{"current_steps": 43, "total_steps": 69, "loss": 0.5402, "lr": 1.5564520106184644e-05, "epoch": 0.6209386281588448, "percentage": 62.32, "elapsed_time": "1:53:20", "remaining_time": "1:08:31", "throughput": 13260.29, "total_tokens": 90177536}
|
44 |
+
{"current_steps": 44, "total_steps": 69, "loss": 0.5413, "lr": 1.4520586989583406e-05, "epoch": 0.6353790613718412, "percentage": 63.77, "elapsed_time": "1:55:58", "remaining_time": "1:05:53", "throughput": 13261.5, "total_tokens": 92274688}
|
45 |
+
{"current_steps": 45, "total_steps": 69, "loss": 0.556, "lr": 1.3498374056721197e-05, "epoch": 0.6498194945848376, "percentage": 65.22, "elapsed_time": "1:58:35", "remaining_time": "1:03:15", "throughput": 13262.39, "total_tokens": 94371840}
|
46 |
+
{"current_steps": 46, "total_steps": 69, "loss": 0.5341, "lr": 1.2500000000000006e-05, "epoch": 0.6642599277978339, "percentage": 66.67, "elapsed_time": "2:01:13", "remaining_time": "1:00:36", "throughput": 13263.23, "total_tokens": 96468992}
|
47 |
+
{"current_steps": 47, "total_steps": 69, "loss": 0.5436, "lr": 1.1527534102110612e-05, "epoch": 0.6787003610108303, "percentage": 68.12, "elapsed_time": "2:03:51", "remaining_time": "0:57:58", "throughput": 13264.02, "total_tokens": 98566144}
|
48 |
+
{"current_steps": 48, "total_steps": 69, "loss": 0.5393, "lr": 1.0582991947128324e-05, "epoch": 0.6931407942238267, "percentage": 69.57, "elapsed_time": "2:06:28", "remaining_time": "0:55:20", "throughput": 13264.84, "total_tokens": 100663296}
|
49 |
+
{"current_steps": 49, "total_steps": 69, "loss": 0.5598, "lr": 9.668331242907089e-06, "epoch": 0.7075812274368231, "percentage": 71.01, "elapsed_time": "2:09:06", "remaining_time": "0:52:41", "throughput": 13265.65, "total_tokens": 102760448}
|
50 |
+
{"current_steps": 50, "total_steps": 69, "loss": 0.5329, "lr": 8.785447763431101e-06, "epoch": 0.7220216606498195, "percentage": 72.46, "elapsed_time": "2:11:44", "remaining_time": "0:50:03", "throughput": 13265.87, "total_tokens": 104857600}
|
51 |
+
{"current_steps": 51, "total_steps": 69, "loss": 0.5384, "lr": 7.936171419533653e-06, "epoch": 0.7364620938628159, "percentage": 73.91, "elapsed_time": "2:14:21", "remaining_time": "0:47:25", "throughput": 13266.73, "total_tokens": 106954752}
|
52 |
+
{"current_steps": 52, "total_steps": 69, "loss": 0.5447, "lr": 7.122262466127514e-06, "epoch": 0.7509025270758123, "percentage": 75.36, "elapsed_time": "2:16:59", "remaining_time": "0:44:47", "throughput": 13267.67, "total_tokens": 109051904}
|
53 |
+
{"current_steps": 53, "total_steps": 69, "loss": 0.5291, "lr": 6.3454078538078635e-06, "epoch": 0.7653429602888087, "percentage": 76.81, "elapsed_time": "2:19:37", "remaining_time": "0:42:08", "throughput": 13268.2, "total_tokens": 111149056}
|
54 |
+
{"current_steps": 54, "total_steps": 69, "loss": 0.5258, "lr": 5.607217732389503e-06, "epoch": 0.779783393501805, "percentage": 78.26, "elapsed_time": "2:22:14", "remaining_time": "0:39:30", "throughput": 13269.48, "total_tokens": 113246208}
|
55 |
+
{"current_steps": 55, "total_steps": 69, "loss": 0.5508, "lr": 4.9092221136255444e-06, "epoch": 0.7942238267148014, "percentage": 79.71, "elapsed_time": "2:24:51", "remaining_time": "0:36:52", "throughput": 13270.12, "total_tokens": 115343360}
|
56 |
+
{"current_steps": 56, "total_steps": 69, "loss": 0.5439, "lr": 4.252867700024374e-06, "epoch": 0.8086642599277978, "percentage": 81.16, "elapsed_time": "2:27:29", "remaining_time": "0:34:14", "throughput": 13270.24, "total_tokens": 117440512}
|
57 |
+
{"current_steps": 57, "total_steps": 69, "loss": 0.5261, "lr": 3.6395148863377858e-06, "epoch": 0.8231046931407943, "percentage": 82.61, "elapsed_time": "2:30:08", "remaining_time": "0:31:36", "throughput": 13270.03, "total_tokens": 119537664}
|
58 |
+
{"current_steps": 58, "total_steps": 69, "loss": 0.5518, "lr": 3.0704349399351435e-06, "epoch": 0.8375451263537906, "percentage": 84.06, "elapsed_time": "2:32:45", "remaining_time": "0:28:58", "throughput": 13270.36, "total_tokens": 121634816}
|
59 |
+
{"current_steps": 59, "total_steps": 69, "loss": 0.5444, "lr": 2.5468073659076e-06, "epoch": 0.851985559566787, "percentage": 85.51, "elapsed_time": "2:35:23", "remaining_time": "0:26:20", "throughput": 13270.7, "total_tokens": 123731968}
|
60 |
+
{"current_steps": 60, "total_steps": 69, "loss": 0.5469, "lr": 2.0697174623636794e-06, "epoch": 0.8664259927797834, "percentage": 86.96, "elapsed_time": "2:38:01", "remaining_time": "0:23:42", "throughput": 13270.96, "total_tokens": 125829120}
|
61 |
+
{"current_steps": 61, "total_steps": 69, "loss": 0.5335, "lr": 1.6401540709832242e-06, "epoch": 0.8808664259927798, "percentage": 88.41, "elapsed_time": "2:40:39", "remaining_time": "0:21:04", "throughput": 13271.26, "total_tokens": 127926272}
|
62 |
+
{"current_steps": 62, "total_steps": 69, "loss": 0.5367, "lr": 1.2590075274920205e-06, "epoch": 0.8953068592057761, "percentage": 89.86, "elapsed_time": "2:43:16", "remaining_time": "0:18:26", "throughput": 13272.14, "total_tokens": 130023424}
|
63 |
+
{"current_steps": 63, "total_steps": 69, "loss": 0.551, "lr": 9.270678163050217e-07, "epoch": 0.9097472924187726, "percentage": 91.3, "elapsed_time": "2:45:54", "remaining_time": "0:15:48", "throughput": 13272.46, "total_tokens": 132120576}
|
64 |
+
{"current_steps": 64, "total_steps": 69, "loss": 0.5597, "lr": 6.450229331630253e-07, "epoch": 0.924187725631769, "percentage": 92.75, "elapsed_time": "2:48:32", "remaining_time": "0:13:10", "throughput": 13272.79, "total_tokens": 134217728}
|
65 |
+
{"current_steps": 65, "total_steps": 69, "loss": 0.5369, "lr": 4.134574591564494e-07, "epoch": 0.9386281588447654, "percentage": 94.2, "elapsed_time": "2:51:09", "remaining_time": "0:10:31", "throughput": 13273.81, "total_tokens": 136314880}
|
66 |
+
{"current_steps": 66, "total_steps": 69, "loss": 0.5451, "lr": 2.3285134909173112e-07, "epoch": 0.9530685920577617, "percentage": 95.65, "elapsed_time": "2:53:45", "remaining_time": "0:07:53", "throughput": 13276.09, "total_tokens": 138412032}
|
67 |
+
{"current_steps": 67, "total_steps": 69, "loss": 0.5237, "lr": 1.0357893671171792e-07, "epoch": 0.9675090252707581, "percentage": 97.1, "elapsed_time": "2:56:20", "remaining_time": "0:05:15", "throughput": 13279.42, "total_tokens": 140509184}
|
68 |
+
{"current_steps": 68, "total_steps": 69, "loss": 0.5604, "lr": 2.590815883181108e-08, "epoch": 0.9819494584837545, "percentage": 98.55, "elapsed_time": "2:58:55", "remaining_time": "0:02:37", "throughput": 13283.18, "total_tokens": 142606336}
|
69 |
+
{"current_steps": 69, "total_steps": 69, "loss": 0.534, "lr": 0.0, "epoch": 0.9963898916967509, "percentage": 100.0, "elapsed_time": "3:01:30", "remaining_time": "0:00:00", "throughput": 13286.8, "total_tokens": 144703488}
|
70 |
+
{"current_steps": 69, "total_steps": 69, "epoch": 0.9963898916967509, "percentage": 100.0, "elapsed_time": "3:01:52", "remaining_time": "0:00:00", "throughput": 13260.92, "total_tokens": 144703488}
|
trainer_state.json
ADDED
@@ -0,0 +1,595 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.9963898916967509,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 69,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.01444043321299639,
|
13 |
+
"grad_norm": 0.9324310421943665,
|
14 |
+
"learning_rate": 4.9974091841168195e-05,
|
15 |
+
"loss": 0.8402,
|
16 |
+
"num_input_tokens_seen": 2097152,
|
17 |
+
"step": 1
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"epoch": 0.02888086642599278,
|
21 |
+
"grad_norm": 0.736538290977478,
|
22 |
+
"learning_rate": 4.9896421063288286e-05,
|
23 |
+
"loss": 0.7686,
|
24 |
+
"num_input_tokens_seen": 4194304,
|
25 |
+
"step": 2
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"epoch": 0.04332129963898917,
|
29 |
+
"grad_norm": 0.6694996953010559,
|
30 |
+
"learning_rate": 4.976714865090827e-05,
|
31 |
+
"loss": 0.7563,
|
32 |
+
"num_input_tokens_seen": 6291456,
|
33 |
+
"step": 3
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"epoch": 0.05776173285198556,
|
37 |
+
"grad_norm": 0.5700488090515137,
|
38 |
+
"learning_rate": 4.958654254084355e-05,
|
39 |
+
"loss": 0.7139,
|
40 |
+
"num_input_tokens_seen": 8388608,
|
41 |
+
"step": 4
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"epoch": 0.07220216606498195,
|
45 |
+
"grad_norm": 0.47626349329948425,
|
46 |
+
"learning_rate": 4.9354977066836986e-05,
|
47 |
+
"loss": 0.6793,
|
48 |
+
"num_input_tokens_seen": 10485760,
|
49 |
+
"step": 5
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"epoch": 0.08664259927797834,
|
53 |
+
"grad_norm": 0.4310459494590759,
|
54 |
+
"learning_rate": 4.907293218369499e-05,
|
55 |
+
"loss": 0.6631,
|
56 |
+
"num_input_tokens_seen": 12582912,
|
57 |
+
"step": 6
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"epoch": 0.10108303249097472,
|
61 |
+
"grad_norm": 0.41724854707717896,
|
62 |
+
"learning_rate": 4.874099247250798e-05,
|
63 |
+
"loss": 0.64,
|
64 |
+
"num_input_tokens_seen": 14680064,
|
65 |
+
"step": 7
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 0.11552346570397112,
|
69 |
+
"grad_norm": 0.37938904762268066,
|
70 |
+
"learning_rate": 4.835984592901678e-05,
|
71 |
+
"loss": 0.6145,
|
72 |
+
"num_input_tokens_seen": 16777216,
|
73 |
+
"step": 8
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"epoch": 0.1299638989169675,
|
77 |
+
"grad_norm": 0.30510303378105164,
|
78 |
+
"learning_rate": 4.793028253763633e-05,
|
79 |
+
"loss": 0.5997,
|
80 |
+
"num_input_tokens_seen": 18874368,
|
81 |
+
"step": 9
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"epoch": 0.1444043321299639,
|
85 |
+
"grad_norm": 0.17869406938552856,
|
86 |
+
"learning_rate": 4.74531926340924e-05,
|
87 |
+
"loss": 0.5896,
|
88 |
+
"num_input_tokens_seen": 20971520,
|
89 |
+
"step": 10
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"epoch": 0.1588447653429603,
|
93 |
+
"grad_norm": 0.1084759458899498,
|
94 |
+
"learning_rate": 4.6929565060064864e-05,
|
95 |
+
"loss": 0.6025,
|
96 |
+
"num_input_tokens_seen": 23068672,
|
97 |
+
"step": 11
|
98 |
+
},
|
99 |
+
{
|
100 |
+
"epoch": 0.17328519855595667,
|
101 |
+
"grad_norm": 0.09031977504491806,
|
102 |
+
"learning_rate": 4.6360485113662216e-05,
|
103 |
+
"loss": 0.5644,
|
104 |
+
"num_input_tokens_seen": 25165824,
|
105 |
+
"step": 12
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"epoch": 0.18772563176895307,
|
109 |
+
"grad_norm": 0.08057376742362976,
|
110 |
+
"learning_rate": 4.574713229997563e-05,
|
111 |
+
"loss": 0.5558,
|
112 |
+
"num_input_tokens_seen": 27262976,
|
113 |
+
"step": 13
|
114 |
+
},
|
115 |
+
{
|
116 |
+
"epoch": 0.20216606498194944,
|
117 |
+
"grad_norm": 0.0670362040400505,
|
118 |
+
"learning_rate": 4.509077788637446e-05,
|
119 |
+
"loss": 0.5705,
|
120 |
+
"num_input_tokens_seen": 29360128,
|
121 |
+
"step": 14
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 0.21660649819494585,
|
125 |
+
"grad_norm": 0.06539376825094223,
|
126 |
+
"learning_rate": 4.43927822676105e-05,
|
127 |
+
"loss": 0.5694,
|
128 |
+
"num_input_tokens_seen": 31457280,
|
129 |
+
"step": 15
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"epoch": 0.23104693140794225,
|
133 |
+
"grad_norm": 0.05930742993950844,
|
134 |
+
"learning_rate": 4.365459214619214e-05,
|
135 |
+
"loss": 0.559,
|
136 |
+
"num_input_tokens_seen": 33554432,
|
137 |
+
"step": 16
|
138 |
+
},
|
139 |
+
{
|
140 |
+
"epoch": 0.24548736462093862,
|
141 |
+
"grad_norm": 0.054464634507894516,
|
142 |
+
"learning_rate": 4.2877737533872485e-05,
|
143 |
+
"loss": 0.5628,
|
144 |
+
"num_input_tokens_seen": 35651584,
|
145 |
+
"step": 17
|
146 |
+
},
|
147 |
+
{
|
148 |
+
"epoch": 0.259927797833935,
|
149 |
+
"grad_norm": 0.053172189742326736,
|
150 |
+
"learning_rate": 4.206382858046636e-05,
|
151 |
+
"loss": 0.5553,
|
152 |
+
"num_input_tokens_seen": 37748736,
|
153 |
+
"step": 18
|
154 |
+
},
|
155 |
+
{
|
156 |
+
"epoch": 0.2743682310469314,
|
157 |
+
"grad_norm": 0.04865848645567894,
|
158 |
+
"learning_rate": 4.12145522365689e-05,
|
159 |
+
"loss": 0.5401,
|
160 |
+
"num_input_tokens_seen": 39845888,
|
161 |
+
"step": 19
|
162 |
+
},
|
163 |
+
{
|
164 |
+
"epoch": 0.2888086642599278,
|
165 |
+
"grad_norm": 0.04852156713604927,
|
166 |
+
"learning_rate": 4.033166875709291e-05,
|
167 |
+
"loss": 0.575,
|
168 |
+
"num_input_tokens_seen": 41943040,
|
169 |
+
"step": 20
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"epoch": 0.30324909747292417,
|
173 |
+
"grad_norm": 0.046296387910842896,
|
174 |
+
"learning_rate": 3.941700805287168e-05,
|
175 |
+
"loss": 0.5398,
|
176 |
+
"num_input_tokens_seen": 44040192,
|
177 |
+
"step": 21
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 0.3176895306859206,
|
181 |
+
"grad_norm": 0.04640813171863556,
|
182 |
+
"learning_rate": 3.8472465897889394e-05,
|
183 |
+
"loss": 0.5389,
|
184 |
+
"num_input_tokens_seen": 46137344,
|
185 |
+
"step": 22
|
186 |
+
},
|
187 |
+
{
|
188 |
+
"epoch": 0.33212996389891697,
|
189 |
+
"grad_norm": 0.043006811290979385,
|
190 |
+
"learning_rate": 3.7500000000000003e-05,
|
191 |
+
"loss": 0.5451,
|
192 |
+
"num_input_tokens_seen": 48234496,
|
193 |
+
"step": 23
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"epoch": 0.34657039711191334,
|
197 |
+
"grad_norm": 0.04406141862273216,
|
198 |
+
"learning_rate": 3.6501625943278805e-05,
|
199 |
+
"loss": 0.5558,
|
200 |
+
"num_input_tokens_seen": 50331648,
|
201 |
+
"step": 24
|
202 |
+
},
|
203 |
+
{
|
204 |
+
"epoch": 0.36101083032490977,
|
205 |
+
"grad_norm": 0.03994145616889,
|
206 |
+
"learning_rate": 3.547941301041661e-05,
|
207 |
+
"loss": 0.5403,
|
208 |
+
"num_input_tokens_seen": 52428800,
|
209 |
+
"step": 25
|
210 |
+
},
|
211 |
+
{
|
212 |
+
"epoch": 0.37545126353790614,
|
213 |
+
"grad_norm": 0.039348017424345016,
|
214 |
+
"learning_rate": 3.443547989381536e-05,
|
215 |
+
"loss": 0.5405,
|
216 |
+
"num_input_tokens_seen": 54525952,
|
217 |
+
"step": 26
|
218 |
+
},
|
219 |
+
{
|
220 |
+
"epoch": 0.3898916967509025,
|
221 |
+
"grad_norm": 0.03861572593450546,
|
222 |
+
"learning_rate": 3.3371990304274656e-05,
|
223 |
+
"loss": 0.5577,
|
224 |
+
"num_input_tokens_seen": 56623104,
|
225 |
+
"step": 27
|
226 |
+
},
|
227 |
+
{
|
228 |
+
"epoch": 0.4043321299638989,
|
229 |
+
"grad_norm": 0.03878667205572128,
|
230 |
+
"learning_rate": 3.2291148486370626e-05,
|
231 |
+
"loss": 0.5345,
|
232 |
+
"num_input_tokens_seen": 58720256,
|
233 |
+
"step": 28
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 0.4187725631768953,
|
237 |
+
"grad_norm": 0.03664080426096916,
|
238 |
+
"learning_rate": 3.11951946498225e-05,
|
239 |
+
"loss": 0.5484,
|
240 |
+
"num_input_tokens_seen": 60817408,
|
241 |
+
"step": 29
|
242 |
+
},
|
243 |
+
{
|
244 |
+
"epoch": 0.4332129963898917,
|
245 |
+
"grad_norm": 0.036824408918619156,
|
246 |
+
"learning_rate": 3.008640032631585e-05,
|
247 |
+
"loss": 0.5485,
|
248 |
+
"num_input_tokens_seen": 62914560,
|
249 |
+
"step": 30
|
250 |
+
},
|
251 |
+
{
|
252 |
+
"epoch": 0.44765342960288806,
|
253 |
+
"grad_norm": 0.037150438874959946,
|
254 |
+
"learning_rate": 2.8967063661406285e-05,
|
255 |
+
"loss": 0.5299,
|
256 |
+
"num_input_tokens_seen": 65011712,
|
257 |
+
"step": 31
|
258 |
+
},
|
259 |
+
{
|
260 |
+
"epoch": 0.4620938628158845,
|
261 |
+
"grad_norm": 0.03484778478741646,
|
262 |
+
"learning_rate": 2.7839504651261872e-05,
|
263 |
+
"loss": 0.539,
|
264 |
+
"num_input_tokens_seen": 67108864,
|
265 |
+
"step": 32
|
266 |
+
},
|
267 |
+
{
|
268 |
+
"epoch": 0.47653429602888087,
|
269 |
+
"grad_norm": 0.035446375608444214,
|
270 |
+
"learning_rate": 2.6706060334116777e-05,
|
271 |
+
"loss": 0.5248,
|
272 |
+
"num_input_tokens_seen": 69206016,
|
273 |
+
"step": 33
|
274 |
+
},
|
275 |
+
{
|
276 |
+
"epoch": 0.49097472924187724,
|
277 |
+
"grad_norm": 0.0350475013256073,
|
278 |
+
"learning_rate": 2.556907994640264e-05,
|
279 |
+
"loss": 0.5342,
|
280 |
+
"num_input_tokens_seen": 71303168,
|
281 |
+
"step": 34
|
282 |
+
},
|
283 |
+
{
|
284 |
+
"epoch": 0.5054151624548736,
|
285 |
+
"grad_norm": 0.036621786653995514,
|
286 |
+
"learning_rate": 2.4430920053597356e-05,
|
287 |
+
"loss": 0.5431,
|
288 |
+
"num_input_tokens_seen": 73400320,
|
289 |
+
"step": 35
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"epoch": 0.51985559566787,
|
293 |
+
"grad_norm": 0.0347721092402935,
|
294 |
+
"learning_rate": 2.329393966588323e-05,
|
295 |
+
"loss": 0.5471,
|
296 |
+
"num_input_tokens_seen": 75497472,
|
297 |
+
"step": 36
|
298 |
+
},
|
299 |
+
{
|
300 |
+
"epoch": 0.5342960288808665,
|
301 |
+
"grad_norm": 0.03457929939031601,
|
302 |
+
"learning_rate": 2.2160495348738123e-05,
|
303 |
+
"loss": 0.542,
|
304 |
+
"num_input_tokens_seen": 77594624,
|
305 |
+
"step": 37
|
306 |
+
},
|
307 |
+
{
|
308 |
+
"epoch": 0.5487364620938628,
|
309 |
+
"grad_norm": 0.035683248192071915,
|
310 |
+
"learning_rate": 2.1032936338593718e-05,
|
311 |
+
"loss": 0.542,
|
312 |
+
"num_input_tokens_seen": 79691776,
|
313 |
+
"step": 38
|
314 |
+
},
|
315 |
+
{
|
316 |
+
"epoch": 0.5631768953068592,
|
317 |
+
"grad_norm": 0.03531257063150406,
|
318 |
+
"learning_rate": 1.991359967368416e-05,
|
319 |
+
"loss": 0.542,
|
320 |
+
"num_input_tokens_seen": 81788928,
|
321 |
+
"step": 39
|
322 |
+
},
|
323 |
+
{
|
324 |
+
"epoch": 0.5776173285198556,
|
325 |
+
"grad_norm": 0.034907545894384384,
|
326 |
+
"learning_rate": 1.8804805350177505e-05,
|
327 |
+
"loss": 0.5425,
|
328 |
+
"num_input_tokens_seen": 83886080,
|
329 |
+
"step": 40
|
330 |
+
},
|
331 |
+
{
|
332 |
+
"epoch": 0.592057761732852,
|
333 |
+
"grad_norm": 0.03420661389827728,
|
334 |
+
"learning_rate": 1.7708851513629377e-05,
|
335 |
+
"loss": 0.5482,
|
336 |
+
"num_input_tokens_seen": 85983232,
|
337 |
+
"step": 41
|
338 |
+
},
|
339 |
+
{
|
340 |
+
"epoch": 0.6064981949458483,
|
341 |
+
"grad_norm": 0.03401198983192444,
|
342 |
+
"learning_rate": 1.6628009695725346e-05,
|
343 |
+
"loss": 0.5598,
|
344 |
+
"num_input_tokens_seen": 88080384,
|
345 |
+
"step": 42
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"epoch": 0.6209386281588448,
|
349 |
+
"grad_norm": 0.03434673324227333,
|
350 |
+
"learning_rate": 1.5564520106184644e-05,
|
351 |
+
"loss": 0.5402,
|
352 |
+
"num_input_tokens_seen": 90177536,
|
353 |
+
"step": 43
|
354 |
+
},
|
355 |
+
{
|
356 |
+
"epoch": 0.6353790613718412,
|
357 |
+
"grad_norm": 0.033791348338127136,
|
358 |
+
"learning_rate": 1.4520586989583406e-05,
|
359 |
+
"loss": 0.5413,
|
360 |
+
"num_input_tokens_seen": 92274688,
|
361 |
+
"step": 44
|
362 |
+
},
|
363 |
+
{
|
364 |
+
"epoch": 0.6498194945848376,
|
365 |
+
"grad_norm": 0.03330031782388687,
|
366 |
+
"learning_rate": 1.3498374056721197e-05,
|
367 |
+
"loss": 0.556,
|
368 |
+
"num_input_tokens_seen": 94371840,
|
369 |
+
"step": 45
|
370 |
+
},
|
371 |
+
{
|
372 |
+
"epoch": 0.6642599277978339,
|
373 |
+
"grad_norm": 0.03298752009868622,
|
374 |
+
"learning_rate": 1.2500000000000006e-05,
|
375 |
+
"loss": 0.5341,
|
376 |
+
"num_input_tokens_seen": 96468992,
|
377 |
+
"step": 46
|
378 |
+
},
|
379 |
+
{
|
380 |
+
"epoch": 0.6787003610108303,
|
381 |
+
"grad_norm": 0.03363870084285736,
|
382 |
+
"learning_rate": 1.1527534102110612e-05,
|
383 |
+
"loss": 0.5436,
|
384 |
+
"num_input_tokens_seen": 98566144,
|
385 |
+
"step": 47
|
386 |
+
},
|
387 |
+
{
|
388 |
+
"epoch": 0.6931407942238267,
|
389 |
+
"grad_norm": 0.032934173941612244,
|
390 |
+
"learning_rate": 1.0582991947128324e-05,
|
391 |
+
"loss": 0.5393,
|
392 |
+
"num_input_tokens_seen": 100663296,
|
393 |
+
"step": 48
|
394 |
+
},
|
395 |
+
{
|
396 |
+
"epoch": 0.7075812274368231,
|
397 |
+
"grad_norm": 0.03438662365078926,
|
398 |
+
"learning_rate": 9.668331242907089e-06,
|
399 |
+
"loss": 0.5598,
|
400 |
+
"num_input_tokens_seen": 102760448,
|
401 |
+
"step": 49
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"epoch": 0.7220216606498195,
|
405 |
+
"grad_norm": 0.03351249918341637,
|
406 |
+
"learning_rate": 8.785447763431101e-06,
|
407 |
+
"loss": 0.5329,
|
408 |
+
"num_input_tokens_seen": 104857600,
|
409 |
+
"step": 50
|
410 |
+
},
|
411 |
+
{
|
412 |
+
"epoch": 0.7364620938628159,
|
413 |
+
"grad_norm": 0.0330129936337471,
|
414 |
+
"learning_rate": 7.936171419533653e-06,
|
415 |
+
"loss": 0.5384,
|
416 |
+
"num_input_tokens_seen": 106954752,
|
417 |
+
"step": 51
|
418 |
+
},
|
419 |
+
{
|
420 |
+
"epoch": 0.7509025270758123,
|
421 |
+
"grad_norm": 0.03433903306722641,
|
422 |
+
"learning_rate": 7.122262466127514e-06,
|
423 |
+
"loss": 0.5447,
|
424 |
+
"num_input_tokens_seen": 109051904,
|
425 |
+
"step": 52
|
426 |
+
},
|
427 |
+
{
|
428 |
+
"epoch": 0.7653429602888087,
|
429 |
+
"grad_norm": 0.03314002603292465,
|
430 |
+
"learning_rate": 6.3454078538078635e-06,
|
431 |
+
"loss": 0.5291,
|
432 |
+
"num_input_tokens_seen": 111149056,
|
433 |
+
"step": 53
|
434 |
+
},
|
435 |
+
{
|
436 |
+
"epoch": 0.779783393501805,
|
437 |
+
"grad_norm": 0.032993488013744354,
|
438 |
+
"learning_rate": 5.607217732389503e-06,
|
439 |
+
"loss": 0.5258,
|
440 |
+
"num_input_tokens_seen": 113246208,
|
441 |
+
"step": 54
|
442 |
+
},
|
443 |
+
{
|
444 |
+
"epoch": 0.7942238267148014,
|
445 |
+
"grad_norm": 0.032629404217004776,
|
446 |
+
"learning_rate": 4.9092221136255444e-06,
|
447 |
+
"loss": 0.5508,
|
448 |
+
"num_input_tokens_seen": 115343360,
|
449 |
+
"step": 55
|
450 |
+
},
|
451 |
+
{
|
452 |
+
"epoch": 0.8086642599277978,
|
453 |
+
"grad_norm": 0.03205695375800133,
|
454 |
+
"learning_rate": 4.252867700024374e-06,
|
455 |
+
"loss": 0.5439,
|
456 |
+
"num_input_tokens_seen": 117440512,
|
457 |
+
"step": 56
|
458 |
+
},
|
459 |
+
{
|
460 |
+
"epoch": 0.8231046931407943,
|
461 |
+
"grad_norm": 0.03285016119480133,
|
462 |
+
"learning_rate": 3.6395148863377858e-06,
|
463 |
+
"loss": 0.5261,
|
464 |
+
"num_input_tokens_seen": 119537664,
|
465 |
+
"step": 57
|
466 |
+
},
|
467 |
+
{
|
468 |
+
"epoch": 0.8375451263537906,
|
469 |
+
"grad_norm": 0.035063523799180984,
|
470 |
+
"learning_rate": 3.0704349399351435e-06,
|
471 |
+
"loss": 0.5518,
|
472 |
+
"num_input_tokens_seen": 121634816,
|
473 |
+
"step": 58
|
474 |
+
},
|
475 |
+
{
|
476 |
+
"epoch": 0.851985559566787,
|
477 |
+
"grad_norm": 0.03116844967007637,
|
478 |
+
"learning_rate": 2.5468073659076e-06,
|
479 |
+
"loss": 0.5444,
|
480 |
+
"num_input_tokens_seen": 123731968,
|
481 |
+
"step": 59
|
482 |
+
},
|
483 |
+
{
|
484 |
+
"epoch": 0.8664259927797834,
|
485 |
+
"grad_norm": 0.03255166485905647,
|
486 |
+
"learning_rate": 2.0697174623636794e-06,
|
487 |
+
"loss": 0.5469,
|
488 |
+
"num_input_tokens_seen": 125829120,
|
489 |
+
"step": 60
|
490 |
+
},
|
491 |
+
{
|
492 |
+
"epoch": 0.8808664259927798,
|
493 |
+
"grad_norm": 0.03249025344848633,
|
494 |
+
"learning_rate": 1.6401540709832242e-06,
|
495 |
+
"loss": 0.5335,
|
496 |
+
"num_input_tokens_seen": 127926272,
|
497 |
+
"step": 61
|
498 |
+
},
|
499 |
+
{
|
500 |
+
"epoch": 0.8953068592057761,
|
501 |
+
"grad_norm": 0.03506583720445633,
|
502 |
+
"learning_rate": 1.2590075274920205e-06,
|
503 |
+
"loss": 0.5367,
|
504 |
+
"num_input_tokens_seen": 130023424,
|
505 |
+
"step": 62
|
506 |
+
},
|
507 |
+
{
|
508 |
+
"epoch": 0.9097472924187726,
|
509 |
+
"grad_norm": 0.03293128311634064,
|
510 |
+
"learning_rate": 9.270678163050217e-07,
|
511 |
+
"loss": 0.551,
|
512 |
+
"num_input_tokens_seen": 132120576,
|
513 |
+
"step": 63
|
514 |
+
},
|
515 |
+
{
|
516 |
+
"epoch": 0.924187725631769,
|
517 |
+
"grad_norm": 0.03347219526767731,
|
518 |
+
"learning_rate": 6.450229331630253e-07,
|
519 |
+
"loss": 0.5597,
|
520 |
+
"num_input_tokens_seen": 134217728,
|
521 |
+
"step": 64
|
522 |
+
},
|
523 |
+
{
|
524 |
+
"epoch": 0.9386281588447654,
|
525 |
+
"grad_norm": 0.03135489672422409,
|
526 |
+
"learning_rate": 4.134574591564494e-07,
|
527 |
+
"loss": 0.5369,
|
528 |
+
"num_input_tokens_seen": 136314880,
|
529 |
+
"step": 65
|
530 |
+
},
|
531 |
+
{
|
532 |
+
"epoch": 0.9530685920577617,
|
533 |
+
"grad_norm": 0.03153960779309273,
|
534 |
+
"learning_rate": 2.3285134909173112e-07,
|
535 |
+
"loss": 0.5451,
|
536 |
+
"num_input_tokens_seen": 138412032,
|
537 |
+
"step": 66
|
538 |
+
},
|
539 |
+
{
|
540 |
+
"epoch": 0.9675090252707581,
|
541 |
+
"grad_norm": 0.03191647306084633,
|
542 |
+
"learning_rate": 1.0357893671171792e-07,
|
543 |
+
"loss": 0.5237,
|
544 |
+
"num_input_tokens_seen": 140509184,
|
545 |
+
"step": 67
|
546 |
+
},
|
547 |
+
{
|
548 |
+
"epoch": 0.9819494584837545,
|
549 |
+
"grad_norm": 0.03295959159731865,
|
550 |
+
"learning_rate": 2.590815883181108e-08,
|
551 |
+
"loss": 0.5604,
|
552 |
+
"num_input_tokens_seen": 142606336,
|
553 |
+
"step": 68
|
554 |
+
},
|
555 |
+
{
|
556 |
+
"epoch": 0.9963898916967509,
|
557 |
+
"grad_norm": 0.03191553056240082,
|
558 |
+
"learning_rate": 0.0,
|
559 |
+
"loss": 0.534,
|
560 |
+
"num_input_tokens_seen": 144703488,
|
561 |
+
"step": 69
|
562 |
+
},
|
563 |
+
{
|
564 |
+
"epoch": 0.9963898916967509,
|
565 |
+
"num_input_tokens_seen": 144703488,
|
566 |
+
"step": 69,
|
567 |
+
"total_flos": 5.635565866281075e+18,
|
568 |
+
"train_loss": 0.5665888682655666,
|
569 |
+
"train_runtime": 10913.0245,
|
570 |
+
"train_samples_per_second": 3.247,
|
571 |
+
"train_steps_per_second": 0.006
|
572 |
+
}
|
573 |
+
],
|
574 |
+
"logging_steps": 1,
|
575 |
+
"max_steps": 69,
|
576 |
+
"num_input_tokens_seen": 144703488,
|
577 |
+
"num_train_epochs": 1,
|
578 |
+
"save_steps": 500,
|
579 |
+
"stateful_callbacks": {
|
580 |
+
"TrainerControl": {
|
581 |
+
"args": {
|
582 |
+
"should_epoch_stop": false,
|
583 |
+
"should_evaluate": false,
|
584 |
+
"should_log": false,
|
585 |
+
"should_save": true,
|
586 |
+
"should_training_stop": true
|
587 |
+
},
|
588 |
+
"attributes": {}
|
589 |
+
}
|
590 |
+
},
|
591 |
+
"total_flos": 5.635565866281075e+18,
|
592 |
+
"train_batch_size": 16,
|
593 |
+
"trial_name": null,
|
594 |
+
"trial_params": null
|
595 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f89cedb3b4452c319d87072cbb65858846722f74c7a4b37d475becf73b8ac27
|
3 |
+
size 5688
|
training_args.yaml
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
apollo_rank: 256
|
2 |
+
apollo_scale: 1
|
3 |
+
apollo_target: all
|
4 |
+
apollo_update_interval: 200
|
5 |
+
bf16: true
|
6 |
+
cutoff_len: 4096
|
7 |
+
dataset: codes3_query_filtered_330k_nlx
|
8 |
+
dataset_dir: data
|
9 |
+
ddp_timeout: 180000000
|
10 |
+
do_train: true
|
11 |
+
enable_liger_kernel: true
|
12 |
+
finetuning_type: freeze
|
13 |
+
flash_attn: auto
|
14 |
+
freeze_trainable_layers: 2
|
15 |
+
freeze_trainable_modules: all
|
16 |
+
gradient_accumulation_steps: 8
|
17 |
+
include_num_input_tokens_seen: true
|
18 |
+
learning_rate: 5.0e-05
|
19 |
+
logging_steps: 1
|
20 |
+
lr_scheduler_type: cosine
|
21 |
+
max_grad_norm: 1.0
|
22 |
+
max_samples: 50000000
|
23 |
+
model_name_or_path: deepseek-ai/deepseek-coder-7b-instruct-v1.5
|
24 |
+
neat_packing: true
|
25 |
+
num_train_epochs: 1.0
|
26 |
+
output_dir: saves/DeepSeek-Coder-7B-Instruct/freeze/deepseek-nlx-330k
|
27 |
+
packing: true
|
28 |
+
per_device_train_batch_size: 16
|
29 |
+
plot_loss: true
|
30 |
+
preprocessing_num_workers: 16
|
31 |
+
report_to: none
|
32 |
+
rope_scaling: llama3
|
33 |
+
save_steps: 500
|
34 |
+
stage: sft
|
35 |
+
template: deepseekcoder
|
36 |
+
trust_remote_code: true
|
37 |
+
use_apollo: true
|
38 |
+
use_llama_pro: true
|
39 |
+
warmup_steps: 0
|
training_loss.png
ADDED
![]() |