kishkath commited on
Commit
cfe3edb
·
verified ·
1 Parent(s): 2008ee6

Upload 15 files

Browse files
phi2-qlora-finetuned/adapter_config.json CHANGED
@@ -14,19 +14,22 @@
14
  "loftq_config": {},
15
  "lora_alpha": 16,
16
  "lora_bias": false,
17
- "lora_dropout": 0.1,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
20
  "modules_to_save": null,
21
  "peft_type": "LORA",
22
- "r": 64,
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
- "dense",
27
- "dense_h_to_4h",
28
- "query_key_value",
29
- "dense_4h_to_h"
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
14
  "loftq_config": {},
15
  "lora_alpha": 16,
16
  "lora_bias": false,
17
+ "lora_dropout": 0.05,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
20
  "modules_to_save": null,
21
  "peft_type": "LORA",
22
+ "r": 16,
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
+ "k_proj",
27
+ "q_proj",
28
+ "up_proj",
29
+ "down_proj",
30
+ "gate_proj",
31
+ "v_proj",
32
+ "o_proj"
33
  ],
34
  "task_type": "CAUSAL_LM",
35
  "use_dora": false,
phi2-qlora-finetuned/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd1bf3d294393a980846405d4f01275417a9a4050b44d287df1e53dc20171d26
3
- size 41951600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:665ed085a1c6782b6f007a8a0d370c75d0888e3672e2dcd344c7f36751aafe53
3
+ size 31483040
phi2-qlora-finetuned/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7e65a6c9ff45185d6c3341bcd6b755b6434d2812a6dfb72451b364a5f9b08a8
3
- size 83926074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:417c1cd07d456d14577d780ae5bcb4ce553d325313e89cc6ac7f81d51d7891dc
3
+ size 63028090
phi2-qlora-finetuned/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3efacd67464c6f3a31ca4e9c2c7111263cd783b56d7a453ce6846f3ef038256
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e1e762d3d91354a1077502c44abb720ff6aaa5d1c35eed7a138a66648653703
3
  size 14244
phi2-qlora-finetuned/scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20196fdcd6e066aadc548dcc3c9ff5244ea31938c846cb03623649ad99fe654f
3
  size 988
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:578927bb1c801cdd14af41be9b1907db16bd1c7b35d8fc1fc2779e79adb5109e
3
  size 988
phi2-qlora-finetuned/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:95bc4bac3452830d3646edef4935599d16c47da1770ba8eb4f20060808418f56
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34bebae8dbb4044169aedad702183ea2a5f7688635af093bea630f24dc71f1ed
3
  size 1064
phi2-qlora-finetuned/trainer_state.json CHANGED
@@ -1,24 +1,112 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.020366598778004074,
5
  "eval_steps": 500,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.020366598778004074,
13
- "grad_norm": 0.05782133340835571,
14
- "learning_rate": 0.0002,
15
- "loss": 1.7741,
16
- "mean_token_accuracy": 0.607810240983963,
17
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
19
  ],
20
  "logging_steps": 10,
21
- "max_steps": 100,
22
  "num_input_tokens_seen": 0,
23
  "num_train_epochs": 1,
24
  "save_steps": 10,
@@ -29,12 +117,12 @@
29
  "should_evaluate": false,
30
  "should_log": false,
31
  "should_save": true,
32
- "should_training_stop": false
33
  },
34
  "attributes": {}
35
  }
36
  },
37
- "total_flos": 1750345677004800.0,
38
  "train_batch_size": 4,
39
  "trial_name": null,
40
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.24439918533604887,
5
  "eval_steps": 500,
6
+ "global_step": 120,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.020366598778004074,
13
+ "grad_norm": 0.1955823004245758,
14
+ "learning_rate": 0.00019868265225415265,
15
+ "loss": 1.6792,
16
+ "mean_token_accuracy": 0.6286436378955841,
17
  "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.04073319755600815,
21
+ "grad_norm": 0.43605607748031616,
22
+ "learning_rate": 0.00019075754196709572,
23
+ "loss": 1.5504,
24
+ "mean_token_accuracy": 0.6437817469239235,
25
+ "step": 20
26
+ },
27
+ {
28
+ "epoch": 0.06109979633401222,
29
+ "grad_norm": 0.5884078145027161,
30
+ "learning_rate": 0.00017621620551276366,
31
+ "loss": 1.317,
32
+ "mean_token_accuracy": 0.6953216314315795,
33
+ "step": 30
34
+ },
35
+ {
36
+ "epoch": 0.0814663951120163,
37
+ "grad_norm": 0.36727696657180786,
38
+ "learning_rate": 0.00015611870653623825,
39
+ "loss": 1.054,
40
+ "mean_token_accuracy": 0.7708626106381417,
41
+ "step": 40
42
+ },
43
+ {
44
+ "epoch": 0.10183299389002037,
45
+ "grad_norm": 0.2848515510559082,
46
+ "learning_rate": 0.000131930153013598,
47
+ "loss": 0.6444,
48
+ "mean_token_accuracy": 0.867109614610672,
49
+ "step": 50
50
+ },
51
+ {
52
+ "epoch": 0.12219959266802444,
53
+ "grad_norm": 0.2064630389213562,
54
+ "learning_rate": 0.00010541389085854176,
55
+ "loss": 0.9981,
56
+ "mean_token_accuracy": 0.756032009422779,
57
+ "step": 60
58
+ },
59
+ {
60
+ "epoch": 0.1425661914460285,
61
+ "grad_norm": 0.18142254650592804,
62
+ "learning_rate": 7.85029559788976e-05,
63
+ "loss": 0.9005,
64
+ "mean_token_accuracy": 0.775178787112236,
65
+ "step": 70
66
+ },
67
+ {
68
+ "epoch": 0.1629327902240326,
69
+ "grad_norm": 0.21299591660499573,
70
+ "learning_rate": 5.3159155930021e-05,
71
+ "loss": 0.8883,
72
+ "mean_token_accuracy": 0.7880317449569703,
73
+ "step": 80
74
+ },
75
+ {
76
+ "epoch": 0.18329938900203666,
77
+ "grad_norm": 0.21353664994239807,
78
+ "learning_rate": 3.123005411465766e-05,
79
+ "loss": 0.8977,
80
+ "mean_token_accuracy": 0.801851412653923,
81
+ "step": 90
82
+ },
83
+ {
84
+ "epoch": 0.20366598778004075,
85
+ "grad_norm": 0.2567966878414154,
86
+ "learning_rate": 1.4314282383241096e-05,
87
+ "loss": 0.5785,
88
+ "mean_token_accuracy": 0.8794851988554001,
89
+ "step": 100
90
+ },
91
+ {
92
+ "epoch": 0.2240325865580448,
93
+ "grad_norm": 0.23000894486904144,
94
+ "learning_rate": 3.6450007480777093e-06,
95
+ "loss": 1.0155,
96
+ "mean_token_accuracy": 0.7514406576752662,
97
+ "step": 110
98
+ },
99
+ {
100
+ "epoch": 0.24439918533604887,
101
+ "grad_norm": 0.19669267535209656,
102
+ "learning_rate": 0.0,
103
+ "loss": 0.938,
104
+ "mean_token_accuracy": 0.7690818622708321,
105
+ "step": 120
106
  }
107
  ],
108
  "logging_steps": 10,
109
+ "max_steps": 120,
110
  "num_input_tokens_seen": 0,
111
  "num_train_epochs": 1,
112
  "save_steps": 10,
 
117
  "should_evaluate": false,
118
  "should_log": false,
119
  "should_save": true,
120
+ "should_training_stop": true
121
  },
122
  "attributes": {}
123
  }
124
  },
125
+ "total_flos": 1.339699180744704e+16,
126
  "train_batch_size": 4,
127
  "trial_name": null,
128
  "trial_params": null
phi2-qlora-finetuned/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1068cda2d19e4aa77d1a75e6fbdc97a36147805511b2655adb34a475792f849b
3
  size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:064fa4c2e54fa8a50b7b4d9697c1ac99d08654a2dc35cecd028c114e5f16ce98
3
  size 5560