|
{ |
|
"best_metric": 0.8737932435834382, |
|
"best_model_checkpoint": "result/roberta-base-cls_before_pooler-sym_mlp-mlp_bert-bs64-gpu8-gs1-lr5e-5-m=stsb-cross_contra-norm0.05-l32", |
|
"epoch": 3.0, |
|
"global_step": 1617, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.969078540507112e-05, |
|
"loss": 6.1836, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.938157081014224e-05, |
|
"loss": 2.6504, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.907235621521336e-05, |
|
"loss": 1.6665, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.876314162028448e-05, |
|
"loss": 1.3732, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.84539270253556e-05, |
|
"loss": 1.2483, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.814471243042672e-05, |
|
"loss": 1.1685, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.783549783549784e-05, |
|
"loss": 1.0963, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.752628324056896e-05, |
|
"loss": 1.0671, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.721706864564008e-05, |
|
"loss": 1.0623, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.69078540507112e-05, |
|
"loss": 0.9754, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.6598639455782315e-05, |
|
"loss": 1.0497, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.628942486085344e-05, |
|
"loss": 1.0097, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"eval_avg_sts": 0.8361329859275041, |
|
"eval_sickr_spearman": 0.8112100964923686, |
|
"eval_stsb_spearman": 0.8610558753626395, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.5980210265924555e-05, |
|
"loss": 0.981, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.567099567099568e-05, |
|
"loss": 0.9648, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.5361781076066796e-05, |
|
"loss": 0.9035, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.505256648113791e-05, |
|
"loss": 0.9459, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.4743351886209036e-05, |
|
"loss": 0.9296, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.4434137291280146e-05, |
|
"loss": 0.9249, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.412492269635127e-05, |
|
"loss": 0.8699, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 4.381570810142239e-05, |
|
"loss": 0.88, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.3506493506493503e-05, |
|
"loss": 0.8992, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.319727891156463e-05, |
|
"loss": 0.893, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.2888064316635744e-05, |
|
"loss": 0.8977, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.257884972170687e-05, |
|
"loss": 0.8957, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.2269635126777984e-05, |
|
"loss": 0.8969, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_avg_sts": 0.8389081420973826, |
|
"eval_sickr_spearman": 0.8101423170762979, |
|
"eval_stsb_spearman": 0.8676739671184673, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.19604205318491e-05, |
|
"loss": 0.8434, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.1651205936920225e-05, |
|
"loss": 0.8362, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.134199134199134e-05, |
|
"loss": 0.8513, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.1032776747062465e-05, |
|
"loss": 0.8384, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.072356215213358e-05, |
|
"loss": 0.8653, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.04143475572047e-05, |
|
"loss": 0.8558, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.010513296227582e-05, |
|
"loss": 0.8241, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 3.979591836734694e-05, |
|
"loss": 0.8324, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 3.948670377241806e-05, |
|
"loss": 0.8396, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.917748917748918e-05, |
|
"loss": 0.8361, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 3.88682745825603e-05, |
|
"loss": 0.8737, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.855905998763142e-05, |
|
"loss": 0.84, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_avg_sts": 0.8445458159728749, |
|
"eval_sickr_spearman": 0.8180373333129849, |
|
"eval_stsb_spearman": 0.8710542986327651, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.824984539270254e-05, |
|
"loss": 0.8384, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.794063079777366e-05, |
|
"loss": 0.8047, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.763141620284478e-05, |
|
"loss": 0.8039, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 3.7322201607915894e-05, |
|
"loss": 0.8474, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 3.701298701298702e-05, |
|
"loss": 0.7912, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 3.6703772418058135e-05, |
|
"loss": 0.8061, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.639455782312925e-05, |
|
"loss": 0.8132, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 3.6085343228200375e-05, |
|
"loss": 0.7815, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.577612863327149e-05, |
|
"loss": 0.7839, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 3.5466914038342616e-05, |
|
"loss": 0.7892, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.515769944341373e-05, |
|
"loss": 0.7738, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 3.484848484848485e-05, |
|
"loss": 0.7702, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 3.453927025355597e-05, |
|
"loss": 0.7715, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_avg_sts": 0.8402489492013288, |
|
"eval_sickr_spearman": 0.8102146999461652, |
|
"eval_stsb_spearman": 0.8702831984564924, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 3.423005565862709e-05, |
|
"loss": 0.7891, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 3.392084106369821e-05, |
|
"loss": 0.7646, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.3611626468769324e-05, |
|
"loss": 0.7798, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.330241187384045e-05, |
|
"loss": 0.7456, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 3.2993197278911564e-05, |
|
"loss": 0.6767, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 3.268398268398268e-05, |
|
"loss": 0.6897, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 3.2374768089053805e-05, |
|
"loss": 0.6593, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 3.206555349412492e-05, |
|
"loss": 0.697, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 3.1756338899196045e-05, |
|
"loss": 0.6816, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.144712430426716e-05, |
|
"loss": 0.6675, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 3.113790970933828e-05, |
|
"loss": 0.6661, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 3.08286951144094e-05, |
|
"loss": 0.6719, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"eval_avg_sts": 0.8420328093315328, |
|
"eval_sickr_spearman": 0.8171519279890747, |
|
"eval_stsb_spearman": 0.8669136906739908, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 3.051948051948052e-05, |
|
"loss": 0.7076, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 3.021026592455164e-05, |
|
"loss": 0.6515, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 2.990105132962276e-05, |
|
"loss": 0.6893, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 2.959183673469388e-05, |
|
"loss": 0.6714, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 2.9282622139764997e-05, |
|
"loss": 0.7052, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 2.8973407544836117e-05, |
|
"loss": 0.6671, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.8664192949907237e-05, |
|
"loss": 0.6735, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 2.8354978354978357e-05, |
|
"loss": 0.6778, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 2.8045763760049478e-05, |
|
"loss": 0.6832, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 2.7736549165120594e-05, |
|
"loss": 0.6587, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 2.7427334570191715e-05, |
|
"loss": 0.6567, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 2.7118119975262835e-05, |
|
"loss": 0.648, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 2.6808905380333955e-05, |
|
"loss": 0.6488, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_avg_sts": 0.8417559206280472, |
|
"eval_sickr_spearman": 0.8131766819096506, |
|
"eval_stsb_spearman": 0.8703351593464438, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 2.6499690785405072e-05, |
|
"loss": 0.6445, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 2.6190476190476192e-05, |
|
"loss": 0.6763, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 2.5881261595547312e-05, |
|
"loss": 0.6585, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 2.5572047000618433e-05, |
|
"loss": 0.6492, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 2.5262832405689553e-05, |
|
"loss": 0.6634, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.495361781076067e-05, |
|
"loss": 0.6757, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 2.4644403215831787e-05, |
|
"loss": 0.6805, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 2.4335188620902907e-05, |
|
"loss": 0.6639, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 2.4025974025974027e-05, |
|
"loss": 0.6576, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 2.3716759431045147e-05, |
|
"loss": 0.6849, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 2.3407544836116267e-05, |
|
"loss": 0.7029, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 2.3098330241187384e-05, |
|
"loss": 0.6384, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"eval_avg_sts": 0.8415267776775321, |
|
"eval_sickr_spearman": 0.8138372056166275, |
|
"eval_stsb_spearman": 0.8692163497384369, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 2.2789115646258505e-05, |
|
"loss": 0.6228, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 2.2479901051329625e-05, |
|
"loss": 0.6844, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 2.2170686456400745e-05, |
|
"loss": 0.646, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 2.1861471861471862e-05, |
|
"loss": 0.6328, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 2.1552257266542982e-05, |
|
"loss": 0.6774, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 2.1243042671614102e-05, |
|
"loss": 0.6613, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 2.0933828076685223e-05, |
|
"loss": 0.6602, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 2.062461348175634e-05, |
|
"loss": 0.6581, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 2.031539888682746e-05, |
|
"loss": 0.6729, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 2.0006184291898576e-05, |
|
"loss": 0.6428, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.9696969696969697e-05, |
|
"loss": 0.6552, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 1.9387755102040817e-05, |
|
"loss": 0.6501, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.9078540507111937e-05, |
|
"loss": 0.6839, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"eval_avg_sts": 0.8442180854633452, |
|
"eval_sickr_spearman": 0.814642927343252, |
|
"eval_stsb_spearman": 0.8737932435834382, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.8769325912183054e-05, |
|
"loss": 0.646, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 1.8460111317254174e-05, |
|
"loss": 0.6217, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 1.8150896722325294e-05, |
|
"loss": 0.6278, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 1.7841682127396415e-05, |
|
"loss": 0.6456, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.7532467532467535e-05, |
|
"loss": 0.6459, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 1.7223252937538652e-05, |
|
"loss": 0.6223, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 1.6914038342609772e-05, |
|
"loss": 0.6357, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.6604823747680892e-05, |
|
"loss": 0.6018, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.6295609152752012e-05, |
|
"loss": 0.5888, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.5986394557823133e-05, |
|
"loss": 0.5913, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 1.567717996289425e-05, |
|
"loss": 0.5581, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 1.5367965367965366e-05, |
|
"loss": 0.5899, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"eval_avg_sts": 0.8410988161028077, |
|
"eval_sickr_spearman": 0.8122731208293968, |
|
"eval_stsb_spearman": 0.8699245113762184, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 1.5058750773036487e-05, |
|
"loss": 0.5456, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 1.4749536178107607e-05, |
|
"loss": 0.6023, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.4440321583178725e-05, |
|
"loss": 0.6004, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.4131106988249846e-05, |
|
"loss": 0.588, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 1.3821892393320964e-05, |
|
"loss": 0.5716, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 1.3512677798392084e-05, |
|
"loss": 0.5813, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.3203463203463205e-05, |
|
"loss": 0.5615, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 1.2894248608534323e-05, |
|
"loss": 0.5874, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.2585034013605443e-05, |
|
"loss": 0.5983, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 1.2275819418676562e-05, |
|
"loss": 0.5922, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 1.1966604823747682e-05, |
|
"loss": 0.5772, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.16573902288188e-05, |
|
"loss": 0.5772, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 1.1348175633889919e-05, |
|
"loss": 0.5828, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"eval_avg_sts": 0.8417651170306701, |
|
"eval_sickr_spearman": 0.8143169402577914, |
|
"eval_stsb_spearman": 0.869213293803549, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 1.103896103896104e-05, |
|
"loss": 0.5919, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 1.0729746444032158e-05, |
|
"loss": 0.5587, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 1.0420531849103278e-05, |
|
"loss": 0.5779, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.0111317254174398e-05, |
|
"loss": 0.5422, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 9.802102659245517e-06, |
|
"loss": 0.5915, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 9.492888064316637e-06, |
|
"loss": 0.5794, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 9.183673469387756e-06, |
|
"loss": 0.5832, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 8.874458874458876e-06, |
|
"loss": 0.5644, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 8.565244279529994e-06, |
|
"loss": 0.5704, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 8.256029684601113e-06, |
|
"loss": 0.5692, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 7.946815089672233e-06, |
|
"loss": 0.5794, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 7.637600494743352e-06, |
|
"loss": 0.5793, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_avg_sts": 0.8438379193565275, |
|
"eval_sickr_spearman": 0.8155493222584906, |
|
"eval_stsb_spearman": 0.8721265164545645, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 7.328385899814472e-06, |
|
"loss": 0.57, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 7.019171304885591e-06, |
|
"loss": 0.5759, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 6.709956709956711e-06, |
|
"loss": 0.5518, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 6.40074211502783e-06, |
|
"loss": 0.5608, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 6.091527520098949e-06, |
|
"loss": 0.5544, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 5.782312925170069e-06, |
|
"loss": 0.5701, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 5.473098330241188e-06, |
|
"loss": 0.5886, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 5.163883735312307e-06, |
|
"loss": 0.5781, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.854669140383426e-06, |
|
"loss": 0.5767, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.5454545454545455e-06, |
|
"loss": 0.5859, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 4.236239950525665e-06, |
|
"loss": 0.5906, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 3.927025355596784e-06, |
|
"loss": 0.5621, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 3.6178107606679037e-06, |
|
"loss": 0.5505, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"eval_avg_sts": 0.8425113202508864, |
|
"eval_sickr_spearman": 0.813630912035951, |
|
"eval_stsb_spearman": 0.8713917284658218, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 3.308596165739023e-06, |
|
"loss": 0.5843, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 2.9993815708101424e-06, |
|
"loss": 0.5518, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 2.690166975881262e-06, |
|
"loss": 0.5837, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 2.3809523809523808e-06, |
|
"loss": 0.5736, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 2.0717377860235006e-06, |
|
"loss": 0.5655, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 1.7625231910946195e-06, |
|
"loss": 0.5563, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 1.4533085961657391e-06, |
|
"loss": 0.5646, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.1440940012368585e-06, |
|
"loss": 0.5493, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 8.348794063079778e-07, |
|
"loss": 0.5594, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 5.25664811379097e-07, |
|
"loss": 0.5628, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 2.1645021645021646e-07, |
|
"loss": 0.59, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1617, |
|
"train_runtime": 1218.5135, |
|
"train_samples_per_second": 1.327 |
|
} |
|
], |
|
"max_steps": 1617, |
|
"num_train_epochs": 3, |
|
"total_flos": 59643369959989248, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|