|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 184, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010869565217391304, |
|
"grad_norm": 4.7562408971323125, |
|
"learning_rate": 1.0526315789473685e-06, |
|
"loss": 1.4045, |
|
"num_tokens": 90966.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.021739130434782608, |
|
"grad_norm": 4.856641936347526, |
|
"learning_rate": 2.105263157894737e-06, |
|
"loss": 1.4286, |
|
"num_tokens": 176808.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03260869565217391, |
|
"grad_norm": 4.632629643475528, |
|
"learning_rate": 3.157894736842105e-06, |
|
"loss": 1.3767, |
|
"num_tokens": 268011.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.043478260869565216, |
|
"grad_norm": 4.433390852741152, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 1.3293, |
|
"num_tokens": 369286.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.05434782608695652, |
|
"grad_norm": 4.267197673905712, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 1.3839, |
|
"num_tokens": 465540.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.06521739130434782, |
|
"grad_norm": 3.717050440728708, |
|
"learning_rate": 6.31578947368421e-06, |
|
"loss": 1.2858, |
|
"num_tokens": 567659.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.07608695652173914, |
|
"grad_norm": 3.180745867673301, |
|
"learning_rate": 7.368421052631579e-06, |
|
"loss": 1.188, |
|
"num_tokens": 665757.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.08695652173913043, |
|
"grad_norm": 2.756203802259368, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 1.2036, |
|
"num_tokens": 764351.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.09782608695652174, |
|
"grad_norm": 2.722957346536631, |
|
"learning_rate": 9.473684210526315e-06, |
|
"loss": 1.1352, |
|
"num_tokens": 859181.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.10869565217391304, |
|
"grad_norm": 3.149236707148637, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 1.0399, |
|
"num_tokens": 958891.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.11956521739130435, |
|
"grad_norm": 2.5830391272833104, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 1.0534, |
|
"num_tokens": 1057964.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.13043478260869565, |
|
"grad_norm": 2.1386887474211687, |
|
"learning_rate": 1.263157894736842e-05, |
|
"loss": 0.9946, |
|
"num_tokens": 1150907.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.14130434782608695, |
|
"grad_norm": 2.211121746514346, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 1.0225, |
|
"num_tokens": 1238881.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.15217391304347827, |
|
"grad_norm": 2.0317494078733, |
|
"learning_rate": 1.4736842105263159e-05, |
|
"loss": 0.9779, |
|
"num_tokens": 1336961.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.16304347826086957, |
|
"grad_norm": 1.6535140053442847, |
|
"learning_rate": 1.578947368421053e-05, |
|
"loss": 0.9645, |
|
"num_tokens": 1427193.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.17391304347826086, |
|
"grad_norm": 3.7204342856811095, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.9152, |
|
"num_tokens": 1519751.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.18478260869565216, |
|
"grad_norm": 1.5902509836923946, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.9191, |
|
"num_tokens": 1616777.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1956521739130435, |
|
"grad_norm": 1.4483857314150967, |
|
"learning_rate": 1.894736842105263e-05, |
|
"loss": 0.8791, |
|
"num_tokens": 1714839.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.20652173913043478, |
|
"grad_norm": 1.3039225121757552, |
|
"learning_rate": 2e-05, |
|
"loss": 0.9332, |
|
"num_tokens": 1815763.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.21739130434782608, |
|
"grad_norm": 1.3126973712776373, |
|
"learning_rate": 1.9878787878787878e-05, |
|
"loss": 0.8782, |
|
"num_tokens": 1908693.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.22826086956521738, |
|
"grad_norm": 1.4053049528153356, |
|
"learning_rate": 1.975757575757576e-05, |
|
"loss": 0.9232, |
|
"num_tokens": 2004172.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2391304347826087, |
|
"grad_norm": 1.1849138803900574, |
|
"learning_rate": 1.963636363636364e-05, |
|
"loss": 0.873, |
|
"num_tokens": 2119371.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 2.01085287355782, |
|
"learning_rate": 1.9515151515151515e-05, |
|
"loss": 0.9279, |
|
"num_tokens": 2212596.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.2608695652173913, |
|
"grad_norm": 1.2458008758973527, |
|
"learning_rate": 1.9393939393939395e-05, |
|
"loss": 0.8654, |
|
"num_tokens": 2294656.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2717391304347826, |
|
"grad_norm": 1.338715786372464, |
|
"learning_rate": 1.9272727272727275e-05, |
|
"loss": 0.8992, |
|
"num_tokens": 2391442.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2826086956521739, |
|
"grad_norm": 1.0044343113961611, |
|
"learning_rate": 1.9151515151515152e-05, |
|
"loss": 0.8313, |
|
"num_tokens": 2494467.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.29347826086956524, |
|
"grad_norm": 1.1561251486922257, |
|
"learning_rate": 1.9030303030303032e-05, |
|
"loss": 0.8516, |
|
"num_tokens": 2584063.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.30434782608695654, |
|
"grad_norm": 1.132844292406038, |
|
"learning_rate": 1.8909090909090912e-05, |
|
"loss": 0.7765, |
|
"num_tokens": 2670487.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.31521739130434784, |
|
"grad_norm": 1.0830282801682392, |
|
"learning_rate": 1.8787878787878792e-05, |
|
"loss": 0.855, |
|
"num_tokens": 2762813.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.32608695652173914, |
|
"grad_norm": 1.0350686000005194, |
|
"learning_rate": 1.866666666666667e-05, |
|
"loss": 0.8527, |
|
"num_tokens": 2850331.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.33695652173913043, |
|
"grad_norm": 1.0369499924518195, |
|
"learning_rate": 1.8545454545454545e-05, |
|
"loss": 0.7974, |
|
"num_tokens": 2942596.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.34782608695652173, |
|
"grad_norm": 1.1454834580125053, |
|
"learning_rate": 1.8424242424242425e-05, |
|
"loss": 0.8509, |
|
"num_tokens": 3039852.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.358695652173913, |
|
"grad_norm": 1.0966923132147153, |
|
"learning_rate": 1.8303030303030305e-05, |
|
"loss": 0.8426, |
|
"num_tokens": 3136550.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3695652173913043, |
|
"grad_norm": 0.9615713216664558, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 0.8209, |
|
"num_tokens": 3226573.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3804347826086957, |
|
"grad_norm": 1.0463868635910665, |
|
"learning_rate": 1.8060606060606062e-05, |
|
"loss": 0.8322, |
|
"num_tokens": 3319993.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.391304347826087, |
|
"grad_norm": 0.9016694267436932, |
|
"learning_rate": 1.7939393939393942e-05, |
|
"loss": 0.8494, |
|
"num_tokens": 3409783.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.40217391304347827, |
|
"grad_norm": 0.8120321544484976, |
|
"learning_rate": 1.781818181818182e-05, |
|
"loss": 0.8246, |
|
"num_tokens": 3508366.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.41304347826086957, |
|
"grad_norm": 0.7992720431772786, |
|
"learning_rate": 1.76969696969697e-05, |
|
"loss": 0.7703, |
|
"num_tokens": 3602876.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.42391304347826086, |
|
"grad_norm": 0.7908788366960535, |
|
"learning_rate": 1.7575757575757576e-05, |
|
"loss": 0.8249, |
|
"num_tokens": 3697241.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.43478260869565216, |
|
"grad_norm": 0.8154728597756308, |
|
"learning_rate": 1.7454545454545456e-05, |
|
"loss": 0.825, |
|
"num_tokens": 3783861.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.44565217391304346, |
|
"grad_norm": 0.8244532763608124, |
|
"learning_rate": 1.7333333333333336e-05, |
|
"loss": 0.845, |
|
"num_tokens": 3884088.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.45652173913043476, |
|
"grad_norm": 0.7694222272972445, |
|
"learning_rate": 1.7212121212121212e-05, |
|
"loss": 0.7804, |
|
"num_tokens": 3972883.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.4673913043478261, |
|
"grad_norm": 0.7461120769896388, |
|
"learning_rate": 1.7090909090909092e-05, |
|
"loss": 0.7936, |
|
"num_tokens": 4065958.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4782608695652174, |
|
"grad_norm": 0.7808686279876842, |
|
"learning_rate": 1.6969696969696972e-05, |
|
"loss": 0.8452, |
|
"num_tokens": 4164566.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4891304347826087, |
|
"grad_norm": 0.7343804102094451, |
|
"learning_rate": 1.684848484848485e-05, |
|
"loss": 0.8031, |
|
"num_tokens": 4254036.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.7439703254330587, |
|
"learning_rate": 1.672727272727273e-05, |
|
"loss": 0.8043, |
|
"num_tokens": 4340424.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.5108695652173914, |
|
"grad_norm": 0.7396028883918819, |
|
"learning_rate": 1.660606060606061e-05, |
|
"loss": 0.7837, |
|
"num_tokens": 4436870.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.5217391304347826, |
|
"grad_norm": 0.7608846409786219, |
|
"learning_rate": 1.6484848484848486e-05, |
|
"loss": 0.8515, |
|
"num_tokens": 4530847.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.532608695652174, |
|
"grad_norm": 0.7181352448640346, |
|
"learning_rate": 1.6363636363636366e-05, |
|
"loss": 0.8198, |
|
"num_tokens": 4624892.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.5434782608695652, |
|
"grad_norm": 0.7087375538457228, |
|
"learning_rate": 1.6242424242424243e-05, |
|
"loss": 0.8, |
|
"num_tokens": 4726543.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5543478260869565, |
|
"grad_norm": 0.7463024608938168, |
|
"learning_rate": 1.6121212121212123e-05, |
|
"loss": 0.7789, |
|
"num_tokens": 4819956.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.5652173913043478, |
|
"grad_norm": 0.7442588718390604, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.8054, |
|
"num_tokens": 4917035.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.5760869565217391, |
|
"grad_norm": 0.7183312987900197, |
|
"learning_rate": 1.587878787878788e-05, |
|
"loss": 0.7923, |
|
"num_tokens": 5017078.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.5869565217391305, |
|
"grad_norm": 0.7212235895921912, |
|
"learning_rate": 1.575757575757576e-05, |
|
"loss": 0.7816, |
|
"num_tokens": 5110982.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5978260869565217, |
|
"grad_norm": 0.6984711344390122, |
|
"learning_rate": 1.563636363636364e-05, |
|
"loss": 0.802, |
|
"num_tokens": 5200799.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.6086956521739131, |
|
"grad_norm": 0.7262030804489741, |
|
"learning_rate": 1.5515151515151516e-05, |
|
"loss": 0.7887, |
|
"num_tokens": 5296443.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.6195652173913043, |
|
"grad_norm": 0.7665109508827059, |
|
"learning_rate": 1.5393939393939393e-05, |
|
"loss": 0.8081, |
|
"num_tokens": 5392587.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.6304347826086957, |
|
"grad_norm": 0.7222105761333159, |
|
"learning_rate": 1.5272727272727276e-05, |
|
"loss": 0.7779, |
|
"num_tokens": 5487722.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.6413043478260869, |
|
"grad_norm": 0.7297075918942776, |
|
"learning_rate": 1.5151515151515153e-05, |
|
"loss": 0.7518, |
|
"num_tokens": 5585642.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.6521739130434783, |
|
"grad_norm": 0.7571906625686418, |
|
"learning_rate": 1.5030303030303031e-05, |
|
"loss": 0.7858, |
|
"num_tokens": 5675685.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6630434782608695, |
|
"grad_norm": 0.7361173105602014, |
|
"learning_rate": 1.4909090909090911e-05, |
|
"loss": 0.8163, |
|
"num_tokens": 5766441.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.6739130434782609, |
|
"grad_norm": 0.7105038659620458, |
|
"learning_rate": 1.478787878787879e-05, |
|
"loss": 0.7505, |
|
"num_tokens": 5856978.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.6847826086956522, |
|
"grad_norm": 0.6825484337570947, |
|
"learning_rate": 1.4666666666666666e-05, |
|
"loss": 0.7751, |
|
"num_tokens": 5941585.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.6956521739130435, |
|
"grad_norm": 0.7314955103489276, |
|
"learning_rate": 1.4545454545454546e-05, |
|
"loss": 0.8163, |
|
"num_tokens": 6043058.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.7065217391304348, |
|
"grad_norm": 0.7103929889225186, |
|
"learning_rate": 1.4424242424242425e-05, |
|
"loss": 0.7832, |
|
"num_tokens": 6147716.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.717391304347826, |
|
"grad_norm": 0.6793775565981599, |
|
"learning_rate": 1.4303030303030305e-05, |
|
"loss": 0.7264, |
|
"num_tokens": 6238620.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.7282608695652174, |
|
"grad_norm": 0.7124999007971334, |
|
"learning_rate": 1.4181818181818183e-05, |
|
"loss": 0.7876, |
|
"num_tokens": 6339589.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.7391304347826086, |
|
"grad_norm": 0.6758760592365682, |
|
"learning_rate": 1.4060606060606061e-05, |
|
"loss": 0.8137, |
|
"num_tokens": 6452148.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.7083257729536743, |
|
"learning_rate": 1.3939393939393942e-05, |
|
"loss": 0.7611, |
|
"num_tokens": 6546822.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.7608695652173914, |
|
"grad_norm": 0.6894843161471015, |
|
"learning_rate": 1.381818181818182e-05, |
|
"loss": 0.8409, |
|
"num_tokens": 6652603.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.7717391304347826, |
|
"grad_norm": 0.7320922145311975, |
|
"learning_rate": 1.3696969696969698e-05, |
|
"loss": 0.7523, |
|
"num_tokens": 6745015.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.782608695652174, |
|
"grad_norm": 0.7040695482776516, |
|
"learning_rate": 1.3575757575757578e-05, |
|
"loss": 0.7612, |
|
"num_tokens": 6838204.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.7934782608695652, |
|
"grad_norm": 0.6847801952071528, |
|
"learning_rate": 1.3454545454545455e-05, |
|
"loss": 0.8138, |
|
"num_tokens": 6933388.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.8043478260869565, |
|
"grad_norm": 0.7550074032673957, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.7649, |
|
"num_tokens": 7029939.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.8152173913043478, |
|
"grad_norm": 0.6925774304998896, |
|
"learning_rate": 1.3212121212121213e-05, |
|
"loss": 0.7576, |
|
"num_tokens": 7128091.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.8260869565217391, |
|
"grad_norm": 0.7562225545168595, |
|
"learning_rate": 1.3090909090909092e-05, |
|
"loss": 0.7916, |
|
"num_tokens": 7214845.0, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.8369565217391305, |
|
"grad_norm": 0.6510325534353791, |
|
"learning_rate": 1.296969696969697e-05, |
|
"loss": 0.7825, |
|
"num_tokens": 7323115.0, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.8478260869565217, |
|
"grad_norm": 0.7026385885074842, |
|
"learning_rate": 1.284848484848485e-05, |
|
"loss": 0.7866, |
|
"num_tokens": 7420683.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.8586956521739131, |
|
"grad_norm": 0.7213706799641426, |
|
"learning_rate": 1.2727272727272728e-05, |
|
"loss": 0.7789, |
|
"num_tokens": 7504805.0, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.8695652173913043, |
|
"grad_norm": 0.7056975956417906, |
|
"learning_rate": 1.2606060606060607e-05, |
|
"loss": 0.779, |
|
"num_tokens": 7594818.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.8804347826086957, |
|
"grad_norm": 0.6742743459752469, |
|
"learning_rate": 1.2484848484848487e-05, |
|
"loss": 0.7664, |
|
"num_tokens": 7687306.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.8913043478260869, |
|
"grad_norm": 0.6788480168543541, |
|
"learning_rate": 1.2363636363636364e-05, |
|
"loss": 0.7481, |
|
"num_tokens": 7787926.0, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.9021739130434783, |
|
"grad_norm": 0.7159837226779882, |
|
"learning_rate": 1.2242424242424242e-05, |
|
"loss": 0.7658, |
|
"num_tokens": 7886472.0, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.9130434782608695, |
|
"grad_norm": 0.6843998675172349, |
|
"learning_rate": 1.2121212121212122e-05, |
|
"loss": 0.8125, |
|
"num_tokens": 7979894.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.9239130434782609, |
|
"grad_norm": 0.7299584101150774, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.7764, |
|
"num_tokens": 8072266.0, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.9347826086956522, |
|
"grad_norm": 0.6972344562839721, |
|
"learning_rate": 1.187878787878788e-05, |
|
"loss": 0.7486, |
|
"num_tokens": 8169019.0, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.9456521739130435, |
|
"grad_norm": 0.695628203135494, |
|
"learning_rate": 1.1757575757575759e-05, |
|
"loss": 0.7216, |
|
"num_tokens": 8263517.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.9565217391304348, |
|
"grad_norm": 0.7199580320946095, |
|
"learning_rate": 1.1636363636363637e-05, |
|
"loss": 0.753, |
|
"num_tokens": 8354397.0, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.967391304347826, |
|
"grad_norm": 0.688593706715687, |
|
"learning_rate": 1.1515151515151517e-05, |
|
"loss": 0.7736, |
|
"num_tokens": 8457076.0, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.9782608695652174, |
|
"grad_norm": 0.7039533145498842, |
|
"learning_rate": 1.1393939393939395e-05, |
|
"loss": 0.747, |
|
"num_tokens": 8543392.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.9891304347826086, |
|
"grad_norm": 0.7417219845924928, |
|
"learning_rate": 1.1272727272727272e-05, |
|
"loss": 0.8096, |
|
"num_tokens": 8634673.0, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.716787642824306, |
|
"learning_rate": 1.1151515151515154e-05, |
|
"loss": 0.7479, |
|
"num_tokens": 8737000.0, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.0108695652173914, |
|
"grad_norm": 0.9557939792897692, |
|
"learning_rate": 1.103030303030303e-05, |
|
"loss": 0.5859, |
|
"num_tokens": 8841242.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.0217391304347827, |
|
"grad_norm": 0.9640008403759153, |
|
"learning_rate": 1.0909090909090909e-05, |
|
"loss": 0.5499, |
|
"num_tokens": 8939461.0, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.0326086956521738, |
|
"grad_norm": 0.8358505468758168, |
|
"learning_rate": 1.0787878787878789e-05, |
|
"loss": 0.5827, |
|
"num_tokens": 9041498.0, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.0434782608695652, |
|
"grad_norm": 0.8333101189839462, |
|
"learning_rate": 1.0666666666666667e-05, |
|
"loss": 0.5649, |
|
"num_tokens": 9135657.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.0543478260869565, |
|
"grad_norm": 0.8267054342321523, |
|
"learning_rate": 1.0545454545454546e-05, |
|
"loss": 0.4877, |
|
"num_tokens": 9223861.0, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.065217391304348, |
|
"grad_norm": 0.9083108087266245, |
|
"learning_rate": 1.0424242424242426e-05, |
|
"loss": 0.5178, |
|
"num_tokens": 9306683.0, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.0760869565217392, |
|
"grad_norm": 1.0509887548129675, |
|
"learning_rate": 1.0303030303030304e-05, |
|
"loss": 0.5147, |
|
"num_tokens": 9403981.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.0869565217391304, |
|
"grad_norm": 0.9617465165511758, |
|
"learning_rate": 1.0181818181818182e-05, |
|
"loss": 0.5772, |
|
"num_tokens": 9510065.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.0978260869565217, |
|
"grad_norm": 0.8289148974326247, |
|
"learning_rate": 1.0060606060606062e-05, |
|
"loss": 0.4808, |
|
"num_tokens": 9594692.0, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 1.108695652173913, |
|
"grad_norm": 0.7858587143009836, |
|
"learning_rate": 9.939393939393939e-06, |
|
"loss": 0.4966, |
|
"num_tokens": 9686151.0, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.1195652173913044, |
|
"grad_norm": 0.7571756541563724, |
|
"learning_rate": 9.81818181818182e-06, |
|
"loss": 0.5566, |
|
"num_tokens": 9785432.0, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 1.1304347826086956, |
|
"grad_norm": 0.7871226692453429, |
|
"learning_rate": 9.696969696969698e-06, |
|
"loss": 0.5247, |
|
"num_tokens": 9872855.0, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.141304347826087, |
|
"grad_norm": 0.7536149334148949, |
|
"learning_rate": 9.575757575757576e-06, |
|
"loss": 0.5696, |
|
"num_tokens": 9966227.0, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.1521739130434783, |
|
"grad_norm": 0.7674793225688019, |
|
"learning_rate": 9.454545454545456e-06, |
|
"loss": 0.5085, |
|
"num_tokens": 10066104.0, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.1630434782608696, |
|
"grad_norm": 0.6536542113768675, |
|
"learning_rate": 9.333333333333334e-06, |
|
"loss": 0.4929, |
|
"num_tokens": 10163479.0, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.1739130434782608, |
|
"grad_norm": 0.705223538854848, |
|
"learning_rate": 9.212121212121213e-06, |
|
"loss": 0.5041, |
|
"num_tokens": 10256103.0, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.184782608695652, |
|
"grad_norm": 0.7303409820035304, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 0.5068, |
|
"num_tokens": 10346350.0, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 1.1956521739130435, |
|
"grad_norm": 0.669830511929235, |
|
"learning_rate": 8.969696969696971e-06, |
|
"loss": 0.5058, |
|
"num_tokens": 10446545.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.2065217391304348, |
|
"grad_norm": 0.6616180902943235, |
|
"learning_rate": 8.84848484848485e-06, |
|
"loss": 0.5212, |
|
"num_tokens": 10543987.0, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.2173913043478262, |
|
"grad_norm": 0.7140780588825608, |
|
"learning_rate": 8.727272727272728e-06, |
|
"loss": 0.5542, |
|
"num_tokens": 10637982.0, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 1.2282608695652173, |
|
"grad_norm": 0.6797454711812516, |
|
"learning_rate": 8.606060606060606e-06, |
|
"loss": 0.5146, |
|
"num_tokens": 10736295.0, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 1.2391304347826086, |
|
"grad_norm": 0.6970790226374188, |
|
"learning_rate": 8.484848484848486e-06, |
|
"loss": 0.4858, |
|
"num_tokens": 10838089.0, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.7455739079700027, |
|
"learning_rate": 8.363636363636365e-06, |
|
"loss": 0.4863, |
|
"num_tokens": 10924837.0, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.2608695652173914, |
|
"grad_norm": 0.6801436530929313, |
|
"learning_rate": 8.242424242424243e-06, |
|
"loss": 0.5271, |
|
"num_tokens": 11021046.0, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.2717391304347827, |
|
"grad_norm": 0.6727217360986417, |
|
"learning_rate": 8.121212121212121e-06, |
|
"loss": 0.5162, |
|
"num_tokens": 11117240.0, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.2826086956521738, |
|
"grad_norm": 0.7023642689024054, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.4953, |
|
"num_tokens": 11210211.0, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.2934782608695652, |
|
"grad_norm": 0.6447357115502464, |
|
"learning_rate": 7.87878787878788e-06, |
|
"loss": 0.5144, |
|
"num_tokens": 11303329.0, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 1.3043478260869565, |
|
"grad_norm": 0.6492607932024856, |
|
"learning_rate": 7.757575757575758e-06, |
|
"loss": 0.5034, |
|
"num_tokens": 11394106.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.315217391304348, |
|
"grad_norm": 0.6515181308514346, |
|
"learning_rate": 7.636363636363638e-06, |
|
"loss": 0.535, |
|
"num_tokens": 11490446.0, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 1.3260869565217392, |
|
"grad_norm": 0.672198656598516, |
|
"learning_rate": 7.515151515151516e-06, |
|
"loss": 0.5065, |
|
"num_tokens": 11581978.0, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 1.3369565217391304, |
|
"grad_norm": 0.6741993377594862, |
|
"learning_rate": 7.393939393939395e-06, |
|
"loss": 0.4874, |
|
"num_tokens": 11665181.0, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 1.3478260869565217, |
|
"grad_norm": 0.6125558254135477, |
|
"learning_rate": 7.272727272727273e-06, |
|
"loss": 0.5178, |
|
"num_tokens": 11764989.0, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.358695652173913, |
|
"grad_norm": 0.6452975872864739, |
|
"learning_rate": 7.151515151515152e-06, |
|
"loss": 0.5048, |
|
"num_tokens": 11860837.0, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.3695652173913042, |
|
"grad_norm": 0.6942100514879066, |
|
"learning_rate": 7.030303030303031e-06, |
|
"loss": 0.5337, |
|
"num_tokens": 11949817.0, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.3804347826086958, |
|
"grad_norm": 0.6803177844769924, |
|
"learning_rate": 6.90909090909091e-06, |
|
"loss": 0.5114, |
|
"num_tokens": 12046147.0, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 1.391304347826087, |
|
"grad_norm": 0.6485232956728539, |
|
"learning_rate": 6.787878787878789e-06, |
|
"loss": 0.5136, |
|
"num_tokens": 12155046.0, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.4021739130434783, |
|
"grad_norm": 0.6399901029660079, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.4899, |
|
"num_tokens": 12245026.0, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.4130434782608696, |
|
"grad_norm": 0.6778428524940225, |
|
"learning_rate": 6.545454545454546e-06, |
|
"loss": 0.5007, |
|
"num_tokens": 12339918.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.4239130434782608, |
|
"grad_norm": 0.6506595090929513, |
|
"learning_rate": 6.424242424242425e-06, |
|
"loss": 0.5123, |
|
"num_tokens": 12431057.0, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 1.434782608695652, |
|
"grad_norm": 0.6042493738676704, |
|
"learning_rate": 6.303030303030303e-06, |
|
"loss": 0.5028, |
|
"num_tokens": 12528957.0, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.4456521739130435, |
|
"grad_norm": 0.6655662091714003, |
|
"learning_rate": 6.181818181818182e-06, |
|
"loss": 0.5159, |
|
"num_tokens": 12630846.0, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.4565217391304348, |
|
"grad_norm": 0.6581649207568613, |
|
"learning_rate": 6.060606060606061e-06, |
|
"loss": 0.5468, |
|
"num_tokens": 12734120.0, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 1.4673913043478262, |
|
"grad_norm": 0.6792172891456207, |
|
"learning_rate": 5.93939393939394e-06, |
|
"loss": 0.515, |
|
"num_tokens": 12825758.0, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.4782608695652173, |
|
"grad_norm": 0.6486207741620927, |
|
"learning_rate": 5.8181818181818185e-06, |
|
"loss": 0.4768, |
|
"num_tokens": 12915706.0, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 1.4891304347826086, |
|
"grad_norm": 0.6229517910332318, |
|
"learning_rate": 5.696969696969698e-06, |
|
"loss": 0.5253, |
|
"num_tokens": 13006745.0, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 0.6931899962995233, |
|
"learning_rate": 5.575757575757577e-06, |
|
"loss": 0.518, |
|
"num_tokens": 13094837.0, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.5108695652173914, |
|
"grad_norm": 0.6790435623873506, |
|
"learning_rate": 5.4545454545454545e-06, |
|
"loss": 0.5225, |
|
"num_tokens": 13186663.0, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 1.5217391304347827, |
|
"grad_norm": 0.6370361345960944, |
|
"learning_rate": 5.333333333333334e-06, |
|
"loss": 0.5281, |
|
"num_tokens": 13275324.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.5326086956521738, |
|
"grad_norm": 0.6988457221148147, |
|
"learning_rate": 5.212121212121213e-06, |
|
"loss": 0.4471, |
|
"num_tokens": 13354804.0, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.5434782608695652, |
|
"grad_norm": 0.6267926090215254, |
|
"learning_rate": 5.090909090909091e-06, |
|
"loss": 0.5081, |
|
"num_tokens": 13459278.0, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 1.5543478260869565, |
|
"grad_norm": 0.6034410178549162, |
|
"learning_rate": 4.9696969696969696e-06, |
|
"loss": 0.4722, |
|
"num_tokens": 13549338.0, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.5652173913043477, |
|
"grad_norm": 0.7074387736221774, |
|
"learning_rate": 4.848484848484849e-06, |
|
"loss": 0.5115, |
|
"num_tokens": 13642023.0, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.5760869565217392, |
|
"grad_norm": 0.625505288129055, |
|
"learning_rate": 4.727272727272728e-06, |
|
"loss": 0.4854, |
|
"num_tokens": 13734577.0, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.5869565217391304, |
|
"grad_norm": 0.6337610272860343, |
|
"learning_rate": 4.606060606060606e-06, |
|
"loss": 0.5218, |
|
"num_tokens": 13827853.0, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.5978260869565217, |
|
"grad_norm": 0.5960020890354443, |
|
"learning_rate": 4.4848484848484855e-06, |
|
"loss": 0.4842, |
|
"num_tokens": 13926421.0, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.608695652173913, |
|
"grad_norm": 0.6099226198445001, |
|
"learning_rate": 4.363636363636364e-06, |
|
"loss": 0.5045, |
|
"num_tokens": 14027160.0, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 1.6195652173913042, |
|
"grad_norm": 0.6403047778481467, |
|
"learning_rate": 4.242424242424243e-06, |
|
"loss": 0.5112, |
|
"num_tokens": 14123445.0, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 1.6304347826086958, |
|
"grad_norm": 0.6428678963084046, |
|
"learning_rate": 4.1212121212121215e-06, |
|
"loss": 0.4792, |
|
"num_tokens": 14213841.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.641304347826087, |
|
"grad_norm": 0.682404747047711, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.4825, |
|
"num_tokens": 14299391.0, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 1.6521739130434783, |
|
"grad_norm": 0.6392379137193949, |
|
"learning_rate": 3.878787878787879e-06, |
|
"loss": 0.5237, |
|
"num_tokens": 14406803.0, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 1.6630434782608696, |
|
"grad_norm": 0.6336927405705557, |
|
"learning_rate": 3.757575757575758e-06, |
|
"loss": 0.4933, |
|
"num_tokens": 14495074.0, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.6739130434782608, |
|
"grad_norm": 0.613723947203496, |
|
"learning_rate": 3.6363636363636366e-06, |
|
"loss": 0.4987, |
|
"num_tokens": 14583035.0, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.6847826086956523, |
|
"grad_norm": 0.6454282865195516, |
|
"learning_rate": 3.5151515151515154e-06, |
|
"loss": 0.4844, |
|
"num_tokens": 14675739.0, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.6956521739130435, |
|
"grad_norm": 0.5744835923909174, |
|
"learning_rate": 3.3939393939393946e-06, |
|
"loss": 0.5221, |
|
"num_tokens": 14771295.0, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.7065217391304348, |
|
"grad_norm": 0.6513316019714479, |
|
"learning_rate": 3.272727272727273e-06, |
|
"loss": 0.5196, |
|
"num_tokens": 14868828.0, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 1.7173913043478262, |
|
"grad_norm": 0.6051344138316549, |
|
"learning_rate": 3.1515151515151517e-06, |
|
"loss": 0.5399, |
|
"num_tokens": 14962321.0, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 1.7282608695652173, |
|
"grad_norm": 0.5997374654265664, |
|
"learning_rate": 3.0303030303030305e-06, |
|
"loss": 0.566, |
|
"num_tokens": 15071474.0, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.7391304347826086, |
|
"grad_norm": 0.6172378822692283, |
|
"learning_rate": 2.9090909090909093e-06, |
|
"loss": 0.5205, |
|
"num_tokens": 15172326.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"grad_norm": 0.6790771644844449, |
|
"learning_rate": 2.7878787878787885e-06, |
|
"loss": 0.5405, |
|
"num_tokens": 15263118.0, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.7608695652173914, |
|
"grad_norm": 0.6250126730731829, |
|
"learning_rate": 2.666666666666667e-06, |
|
"loss": 0.5358, |
|
"num_tokens": 15366482.0, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.7717391304347827, |
|
"grad_norm": 0.616414057408985, |
|
"learning_rate": 2.5454545454545456e-06, |
|
"loss": 0.5124, |
|
"num_tokens": 15466519.0, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.7826086956521738, |
|
"grad_norm": 0.6196261922311431, |
|
"learning_rate": 2.4242424242424244e-06, |
|
"loss": 0.5011, |
|
"num_tokens": 15568813.0, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.7934782608695652, |
|
"grad_norm": 0.6012492351790968, |
|
"learning_rate": 2.303030303030303e-06, |
|
"loss": 0.4866, |
|
"num_tokens": 15663551.0, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.8043478260869565, |
|
"grad_norm": 0.6093660563249755, |
|
"learning_rate": 2.181818181818182e-06, |
|
"loss": 0.4806, |
|
"num_tokens": 15751970.0, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.8152173913043477, |
|
"grad_norm": 0.5733134815640453, |
|
"learning_rate": 2.0606060606060607e-06, |
|
"loss": 0.5342, |
|
"num_tokens": 15851852.0, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.8260869565217392, |
|
"grad_norm": 0.5629607438246448, |
|
"learning_rate": 1.9393939393939395e-06, |
|
"loss": 0.4899, |
|
"num_tokens": 15953339.0, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.8369565217391304, |
|
"grad_norm": 0.5705735862059056, |
|
"learning_rate": 1.8181818181818183e-06, |
|
"loss": 0.5279, |
|
"num_tokens": 16049218.0, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.8478260869565217, |
|
"grad_norm": 0.6346619776519064, |
|
"learning_rate": 1.6969696969696973e-06, |
|
"loss": 0.5129, |
|
"num_tokens": 16148821.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.858695652173913, |
|
"grad_norm": 0.6442961172463525, |
|
"learning_rate": 1.5757575757575759e-06, |
|
"loss": 0.4936, |
|
"num_tokens": 16236603.0, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.8695652173913042, |
|
"grad_norm": 0.6198075225575228, |
|
"learning_rate": 1.4545454545454546e-06, |
|
"loss": 0.5051, |
|
"num_tokens": 16331317.0, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.8804347826086958, |
|
"grad_norm": 0.5952174959924715, |
|
"learning_rate": 1.3333333333333334e-06, |
|
"loss": 0.5081, |
|
"num_tokens": 16435421.0, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.891304347826087, |
|
"grad_norm": 0.608740415655138, |
|
"learning_rate": 1.2121212121212122e-06, |
|
"loss": 0.4985, |
|
"num_tokens": 16525942.0, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.9021739130434783, |
|
"grad_norm": 0.6186344271280536, |
|
"learning_rate": 1.090909090909091e-06, |
|
"loss": 0.4966, |
|
"num_tokens": 16612253.0, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.9130434782608696, |
|
"grad_norm": 0.6128028556805487, |
|
"learning_rate": 9.696969696969698e-07, |
|
"loss": 0.4677, |
|
"num_tokens": 16695763.0, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.9239130434782608, |
|
"grad_norm": 0.6186139521919142, |
|
"learning_rate": 8.484848484848486e-07, |
|
"loss": 0.5063, |
|
"num_tokens": 16792394.0, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.9347826086956523, |
|
"grad_norm": 0.621118150443857, |
|
"learning_rate": 7.272727272727273e-07, |
|
"loss": 0.4859, |
|
"num_tokens": 16887893.0, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.9456521739130435, |
|
"grad_norm": 0.63455296339652, |
|
"learning_rate": 6.060606060606061e-07, |
|
"loss": 0.5323, |
|
"num_tokens": 16978736.0, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.9565217391304348, |
|
"grad_norm": 0.6338215890082622, |
|
"learning_rate": 4.848484848484849e-07, |
|
"loss": 0.5063, |
|
"num_tokens": 17069163.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.9673913043478262, |
|
"grad_norm": 0.6122769745533587, |
|
"learning_rate": 3.6363636363636366e-07, |
|
"loss": 0.5523, |
|
"num_tokens": 17170696.0, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.9782608695652173, |
|
"grad_norm": 0.5640059310251839, |
|
"learning_rate": 2.4242424242424244e-07, |
|
"loss": 0.5126, |
|
"num_tokens": 17276399.0, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.9891304347826086, |
|
"grad_norm": 0.67719515967601, |
|
"learning_rate": 1.2121212121212122e-07, |
|
"loss": 0.549, |
|
"num_tokens": 17377036.0, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.5303896052463619, |
|
"learning_rate": 0.0, |
|
"loss": 0.476, |
|
"num_tokens": 17486096.0, |
|
"step": 184 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 184, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 198078707531776.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|