|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 82, |
|
"global_step": 245, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004081632653061225, |
|
"grad_norm": 0.03618604317307472, |
|
"learning_rate": 2e-05, |
|
"loss": 1.1867, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004081632653061225, |
|
"eval_loss": 1.2217298746109009, |
|
"eval_runtime": 117.7701, |
|
"eval_samples_per_second": 20.735, |
|
"eval_steps_per_second": 10.368, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00816326530612245, |
|
"grad_norm": 0.03361086547374725, |
|
"learning_rate": 4e-05, |
|
"loss": 1.1923, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.012244897959183673, |
|
"grad_norm": 0.034457892179489136, |
|
"learning_rate": 6e-05, |
|
"loss": 1.0744, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0163265306122449, |
|
"grad_norm": 0.03408154472708702, |
|
"learning_rate": 8e-05, |
|
"loss": 1.2796, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02040816326530612, |
|
"grad_norm": 0.03475559875369072, |
|
"learning_rate": 0.0001, |
|
"loss": 1.2709, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.024489795918367346, |
|
"grad_norm": 0.03768599033355713, |
|
"learning_rate": 0.00012, |
|
"loss": 1.0895, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02857142857142857, |
|
"grad_norm": 0.03434902802109718, |
|
"learning_rate": 0.00014, |
|
"loss": 1.2194, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0326530612244898, |
|
"grad_norm": 0.029295403510332108, |
|
"learning_rate": 0.00016, |
|
"loss": 1.1522, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.036734693877551024, |
|
"grad_norm": 0.027247965335845947, |
|
"learning_rate": 0.00018, |
|
"loss": 1.1636, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.04081632653061224, |
|
"grad_norm": 0.027813177555799484, |
|
"learning_rate": 0.0002, |
|
"loss": 1.1848, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.044897959183673466, |
|
"grad_norm": 0.027455640956759453, |
|
"learning_rate": 0.00019999906115681734, |
|
"loss": 1.2151, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04897959183673469, |
|
"grad_norm": 0.026825131848454475, |
|
"learning_rate": 0.0001999962446448979, |
|
"loss": 1.1751, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.053061224489795916, |
|
"grad_norm": 0.026261834427714348, |
|
"learning_rate": 0.0001999915505171269, |
|
"loss": 1.0573, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05714285714285714, |
|
"grad_norm": 0.022048471495509148, |
|
"learning_rate": 0.0001999849788616454, |
|
"loss": 1.1997, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.061224489795918366, |
|
"grad_norm": 0.01966056413948536, |
|
"learning_rate": 0.00019997652980184843, |
|
"loss": 1.1469, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0653061224489796, |
|
"grad_norm": 0.021213103085756302, |
|
"learning_rate": 0.00019996620349638285, |
|
"loss": 1.15, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.06938775510204082, |
|
"grad_norm": 0.02238585613667965, |
|
"learning_rate": 0.00019995400013914427, |
|
"loss": 1.206, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.07346938775510205, |
|
"grad_norm": 0.02359834685921669, |
|
"learning_rate": 0.0001999399199592735, |
|
"loss": 1.3413, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07755102040816327, |
|
"grad_norm": 0.022565221413969994, |
|
"learning_rate": 0.00019992396322115213, |
|
"loss": 1.1798, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.08163265306122448, |
|
"grad_norm": 0.02135898545384407, |
|
"learning_rate": 0.0001999061302243977, |
|
"loss": 1.1602, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08571428571428572, |
|
"grad_norm": 0.02203250862658024, |
|
"learning_rate": 0.00019988642130385788, |
|
"loss": 1.0915, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08979591836734693, |
|
"grad_norm": 0.01716572232544422, |
|
"learning_rate": 0.00019986483682960445, |
|
"loss": 1.1858, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.09387755102040816, |
|
"grad_norm": 0.01753074862062931, |
|
"learning_rate": 0.00019984137720692612, |
|
"loss": 1.1849, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.09795918367346938, |
|
"grad_norm": 0.017932750284671783, |
|
"learning_rate": 0.00019981604287632102, |
|
"loss": 1.1579, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.10204081632653061, |
|
"grad_norm": 0.017208363860845566, |
|
"learning_rate": 0.00019978883431348845, |
|
"loss": 1.1223, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.10612244897959183, |
|
"grad_norm": 0.01637221872806549, |
|
"learning_rate": 0.00019975975202931982, |
|
"loss": 1.1809, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.11020408163265306, |
|
"grad_norm": 0.016254756599664688, |
|
"learning_rate": 0.0001997287965698893, |
|
"loss": 1.1978, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.11428571428571428, |
|
"grad_norm": 0.016743820160627365, |
|
"learning_rate": 0.00019969596851644327, |
|
"loss": 1.1273, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.11836734693877551, |
|
"grad_norm": 0.020556606352329254, |
|
"learning_rate": 0.0001996612684853896, |
|
"loss": 1.1638, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.12244897959183673, |
|
"grad_norm": 0.019542187452316284, |
|
"learning_rate": 0.00019962469712828614, |
|
"loss": 1.2558, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.12653061224489795, |
|
"grad_norm": 0.01658390648663044, |
|
"learning_rate": 0.00019958625513182815, |
|
"loss": 1.2176, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.1306122448979592, |
|
"grad_norm": 0.017420461401343346, |
|
"learning_rate": 0.00019954594321783584, |
|
"loss": 1.2671, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1346938775510204, |
|
"grad_norm": 0.01890096440911293, |
|
"learning_rate": 0.00019950376214324052, |
|
"loss": 1.1448, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.13877551020408163, |
|
"grad_norm": 0.016807271167635918, |
|
"learning_rate": 0.00019945971270007043, |
|
"loss": 1.2085, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 0.015759294852614403, |
|
"learning_rate": 0.00019941379571543596, |
|
"loss": 1.2207, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1469387755102041, |
|
"grad_norm": 0.016418293118476868, |
|
"learning_rate": 0.00019936601205151414, |
|
"loss": 1.1031, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1510204081632653, |
|
"grad_norm": 0.017639920115470886, |
|
"learning_rate": 0.00019931636260553224, |
|
"loss": 1.1439, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.15510204081632653, |
|
"grad_norm": 0.01651432178914547, |
|
"learning_rate": 0.00019926484830975113, |
|
"loss": 1.1111, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.15918367346938775, |
|
"grad_norm": 0.016843752935528755, |
|
"learning_rate": 0.0001992114701314478, |
|
"loss": 1.2519, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.16326530612244897, |
|
"grad_norm": 0.016024339944124222, |
|
"learning_rate": 0.00019915622907289694, |
|
"loss": 1.2122, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1673469387755102, |
|
"grad_norm": 0.017350338399410248, |
|
"learning_rate": 0.00019909912617135244, |
|
"loss": 1.2104, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.17142857142857143, |
|
"grad_norm": 0.01698286086320877, |
|
"learning_rate": 0.00019904016249902763, |
|
"loss": 1.2258, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.17551020408163265, |
|
"grad_norm": 0.016692234203219414, |
|
"learning_rate": 0.00019897933916307543, |
|
"loss": 1.1614, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.17959183673469387, |
|
"grad_norm": 0.01875966228544712, |
|
"learning_rate": 0.00019891665730556725, |
|
"loss": 1.2314, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.1836734693877551, |
|
"grad_norm": 0.016782566905021667, |
|
"learning_rate": 0.00019885211810347184, |
|
"loss": 1.2096, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.18775510204081633, |
|
"grad_norm": 0.017428990453481674, |
|
"learning_rate": 0.00019878572276863294, |
|
"loss": 1.0537, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.19183673469387755, |
|
"grad_norm": 0.016738982871174812, |
|
"learning_rate": 0.00019871747254774673, |
|
"loss": 1.2018, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.19591836734693877, |
|
"grad_norm": 0.018019411712884903, |
|
"learning_rate": 0.0001986473687223383, |
|
"loss": 1.2504, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.017473606392741203, |
|
"learning_rate": 0.0001985754126087376, |
|
"loss": 1.0481, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.20408163265306123, |
|
"grad_norm": 0.017300540581345558, |
|
"learning_rate": 0.00019850160555805486, |
|
"loss": 1.2109, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.20816326530612245, |
|
"grad_norm": 0.0182975921779871, |
|
"learning_rate": 0.00019842594895615488, |
|
"loss": 1.1598, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.21224489795918366, |
|
"grad_norm": 0.020277326926589012, |
|
"learning_rate": 0.00019834844422363142, |
|
"loss": 1.1177, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.2163265306122449, |
|
"grad_norm": 0.018494602292776108, |
|
"learning_rate": 0.00019826909281578026, |
|
"loss": 1.1037, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.22040816326530613, |
|
"grad_norm": 0.017682479694485664, |
|
"learning_rate": 0.00019818789622257196, |
|
"loss": 1.1358, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.22448979591836735, |
|
"grad_norm": 0.0186194758862257, |
|
"learning_rate": 0.00019810485596862392, |
|
"loss": 1.1403, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.22857142857142856, |
|
"grad_norm": 0.018025796860456467, |
|
"learning_rate": 0.00019801997361317163, |
|
"loss": 1.1959, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.23265306122448978, |
|
"grad_norm": 0.01819712296128273, |
|
"learning_rate": 0.0001979332507500395, |
|
"loss": 1.1181, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.23673469387755103, |
|
"grad_norm": 0.018991166725754738, |
|
"learning_rate": 0.00019784468900761095, |
|
"loss": 1.1393, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.24081632653061225, |
|
"grad_norm": 0.01914682239294052, |
|
"learning_rate": 0.0001977542900487977, |
|
"loss": 1.182, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.24489795918367346, |
|
"grad_norm": 0.01992950588464737, |
|
"learning_rate": 0.00019766205557100868, |
|
"loss": 1.1629, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.24897959183673468, |
|
"grad_norm": 0.024829212576150894, |
|
"learning_rate": 0.00019756798730611813, |
|
"loss": 1.0753, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.2530612244897959, |
|
"grad_norm": 0.02457556687295437, |
|
"learning_rate": 0.00019747208702043296, |
|
"loss": 1.1718, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.2571428571428571, |
|
"grad_norm": 0.019101744517683983, |
|
"learning_rate": 0.0001973743565146599, |
|
"loss": 1.1343, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.2612244897959184, |
|
"grad_norm": 0.020323360338807106, |
|
"learning_rate": 0.00019727479762387116, |
|
"loss": 1.1689, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.2653061224489796, |
|
"grad_norm": 0.018925843760371208, |
|
"learning_rate": 0.00019717341221747056, |
|
"loss": 1.2098, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.2693877551020408, |
|
"grad_norm": 0.019229738041758537, |
|
"learning_rate": 0.00019707020219915806, |
|
"loss": 1.1542, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.27346938775510204, |
|
"grad_norm": 0.019665885716676712, |
|
"learning_rate": 0.00019696516950689404, |
|
"loss": 1.1154, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.27755102040816326, |
|
"grad_norm": 0.019163204357028008, |
|
"learning_rate": 0.0001968583161128631, |
|
"loss": 1.1726, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.2816326530612245, |
|
"grad_norm": 0.019880875945091248, |
|
"learning_rate": 0.00019674964402343684, |
|
"loss": 1.1441, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 0.019096847623586655, |
|
"learning_rate": 0.00019663915527913625, |
|
"loss": 1.2021, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2897959183673469, |
|
"grad_norm": 0.019253911450505257, |
|
"learning_rate": 0.00019652685195459344, |
|
"loss": 1.1237, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.2938775510204082, |
|
"grad_norm": 0.020909370854496956, |
|
"learning_rate": 0.00019641273615851257, |
|
"loss": 1.0575, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.2979591836734694, |
|
"grad_norm": 0.019176874309778214, |
|
"learning_rate": 0.00019629681003363044, |
|
"loss": 1.0377, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.3020408163265306, |
|
"grad_norm": 0.019969960674643517, |
|
"learning_rate": 0.00019617907575667602, |
|
"loss": 1.1471, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.30612244897959184, |
|
"grad_norm": 0.019450997933745384, |
|
"learning_rate": 0.00019605953553832988, |
|
"loss": 1.2022, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.31020408163265306, |
|
"grad_norm": 0.02082456275820732, |
|
"learning_rate": 0.00019593819162318232, |
|
"loss": 1.0932, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.3142857142857143, |
|
"grad_norm": 0.020122263580560684, |
|
"learning_rate": 0.00019581504628969154, |
|
"loss": 1.0826, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.3183673469387755, |
|
"grad_norm": 0.020104922354221344, |
|
"learning_rate": 0.00019569010185014062, |
|
"loss": 1.1404, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.3224489795918367, |
|
"grad_norm": 0.021288864314556122, |
|
"learning_rate": 0.00019556336065059432, |
|
"loss": 1.0951, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.32653061224489793, |
|
"grad_norm": 0.02072795107960701, |
|
"learning_rate": 0.00019543482507085482, |
|
"loss": 1.0649, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.3306122448979592, |
|
"grad_norm": 0.020123451948165894, |
|
"learning_rate": 0.00019530449752441718, |
|
"loss": 1.1333, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.3346938775510204, |
|
"grad_norm": 0.020374584943056107, |
|
"learning_rate": 0.00019517238045842404, |
|
"loss": 1.147, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.3346938775510204, |
|
"eval_loss": 1.1397618055343628, |
|
"eval_runtime": 120.3925, |
|
"eval_samples_per_second": 20.284, |
|
"eval_steps_per_second": 10.142, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.33877551020408164, |
|
"grad_norm": 0.0197721179574728, |
|
"learning_rate": 0.00019503847635361944, |
|
"loss": 1.0954, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.34285714285714286, |
|
"grad_norm": 0.021181972697377205, |
|
"learning_rate": 0.00019490278772430256, |
|
"loss": 1.1145, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.3469387755102041, |
|
"grad_norm": 0.021057790145277977, |
|
"learning_rate": 0.00019476531711828027, |
|
"loss": 1.2255, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.3510204081632653, |
|
"grad_norm": 0.0207724217325449, |
|
"learning_rate": 0.00019462606711681936, |
|
"loss": 1.0366, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.3551020408163265, |
|
"grad_norm": 0.021172018721699715, |
|
"learning_rate": 0.00019448504033459818, |
|
"loss": 1.0835, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.35918367346938773, |
|
"grad_norm": 0.02109033428132534, |
|
"learning_rate": 0.00019434223941965738, |
|
"loss": 1.0902, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.363265306122449, |
|
"grad_norm": 0.02078443393111229, |
|
"learning_rate": 0.00019419766705335026, |
|
"loss": 1.087, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.3673469387755102, |
|
"grad_norm": 0.020614784210920334, |
|
"learning_rate": 0.0001940513259502924, |
|
"loss": 1.0834, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.37142857142857144, |
|
"grad_norm": 0.020187893882393837, |
|
"learning_rate": 0.0001939032188583108, |
|
"loss": 1.1847, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.37551020408163266, |
|
"grad_norm": 0.020622489973902702, |
|
"learning_rate": 0.0001937533485583921, |
|
"loss": 1.1767, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.3795918367346939, |
|
"grad_norm": 0.02118833176791668, |
|
"learning_rate": 0.00019360171786463043, |
|
"loss": 1.1491, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.3836734693877551, |
|
"grad_norm": 0.021402837708592415, |
|
"learning_rate": 0.00019344832962417475, |
|
"loss": 1.1547, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.3877551020408163, |
|
"grad_norm": 0.02148307114839554, |
|
"learning_rate": 0.0001932931867171751, |
|
"loss": 1.2062, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.39183673469387753, |
|
"grad_norm": 0.022203955799341202, |
|
"learning_rate": 0.00019313629205672868, |
|
"loss": 1.1271, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.39591836734693875, |
|
"grad_norm": 0.02081882208585739, |
|
"learning_rate": 0.00019297764858882514, |
|
"loss": 1.0965, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.021749386563897133, |
|
"learning_rate": 0.00019281725929229127, |
|
"loss": 1.11, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.40408163265306124, |
|
"grad_norm": 0.02319493517279625, |
|
"learning_rate": 0.00019265512717873498, |
|
"loss": 1.0196, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.40816326530612246, |
|
"grad_norm": 0.021901512518525124, |
|
"learning_rate": 0.0001924912552924889, |
|
"loss": 1.1552, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.4122448979591837, |
|
"grad_norm": 0.02368365228176117, |
|
"learning_rate": 0.00019232564671055306, |
|
"loss": 1.2047, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.4163265306122449, |
|
"grad_norm": 0.022438503801822662, |
|
"learning_rate": 0.00019215830454253724, |
|
"loss": 1.1429, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.4204081632653061, |
|
"grad_norm": 0.022685807198286057, |
|
"learning_rate": 0.00019198923193060254, |
|
"loss": 1.0717, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.42448979591836733, |
|
"grad_norm": 0.023902015760540962, |
|
"learning_rate": 0.00019181843204940232, |
|
"loss": 1.1139, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 0.022928839549422264, |
|
"learning_rate": 0.00019164590810602262, |
|
"loss": 1.1515, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.4326530612244898, |
|
"grad_norm": 0.02210794948041439, |
|
"learning_rate": 0.00019147166333992205, |
|
"loss": 1.1762, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.43673469387755104, |
|
"grad_norm": 0.023219434544444084, |
|
"learning_rate": 0.00019129570102287082, |
|
"loss": 1.1925, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.44081632653061226, |
|
"grad_norm": 0.023004446178674698, |
|
"learning_rate": 0.00019111802445888936, |
|
"loss": 1.1449, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.4448979591836735, |
|
"grad_norm": 0.023740626871585846, |
|
"learning_rate": 0.00019093863698418627, |
|
"loss": 1.2179, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.4489795918367347, |
|
"grad_norm": 0.06276971846818924, |
|
"learning_rate": 0.00019075754196709572, |
|
"loss": 1.2497, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.4530612244897959, |
|
"grad_norm": 0.023355931043624878, |
|
"learning_rate": 0.00019057474280801415, |
|
"loss": 1.1625, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.45714285714285713, |
|
"grad_norm": 0.03020629473030567, |
|
"learning_rate": 0.00019039024293933645, |
|
"loss": 1.0821, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.46122448979591835, |
|
"grad_norm": 0.022392934188246727, |
|
"learning_rate": 0.00019020404582539152, |
|
"loss": 1.1074, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.46530612244897956, |
|
"grad_norm": 0.023577727377414703, |
|
"learning_rate": 0.00019001615496237712, |
|
"loss": 1.1128, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.46938775510204084, |
|
"grad_norm": 0.023507297039031982, |
|
"learning_rate": 0.00018982657387829445, |
|
"loss": 1.1345, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.47346938775510206, |
|
"grad_norm": 0.023036476224660873, |
|
"learning_rate": 0.0001896353061328816, |
|
"loss": 1.128, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.4775510204081633, |
|
"grad_norm": 0.023640332743525505, |
|
"learning_rate": 0.00018944235531754698, |
|
"loss": 1.062, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.4816326530612245, |
|
"grad_norm": 0.022972116246819496, |
|
"learning_rate": 0.00018924772505530174, |
|
"loss": 1.1024, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.4857142857142857, |
|
"grad_norm": 0.023795459419488907, |
|
"learning_rate": 0.00018905141900069178, |
|
"loss": 1.1595, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.4897959183673469, |
|
"grad_norm": 0.02379484474658966, |
|
"learning_rate": 0.00018885344083972914, |
|
"loss": 1.1492, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.49387755102040815, |
|
"grad_norm": 0.023673737421631813, |
|
"learning_rate": 0.00018865379428982271, |
|
"loss": 1.1571, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.49795918367346936, |
|
"grad_norm": 0.02365570329129696, |
|
"learning_rate": 0.00018845248309970854, |
|
"loss": 1.1216, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.5020408163265306, |
|
"grad_norm": 0.025217821821570396, |
|
"learning_rate": 0.00018824951104937947, |
|
"loss": 1.1422, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.5061224489795918, |
|
"grad_norm": 0.02457055076956749, |
|
"learning_rate": 0.00018804488195001392, |
|
"loss": 1.119, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.5102040816326531, |
|
"grad_norm": 0.023103831335902214, |
|
"learning_rate": 0.00018783859964390464, |
|
"loss": 1.153, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.5142857142857142, |
|
"grad_norm": 0.02386569045484066, |
|
"learning_rate": 0.00018763066800438636, |
|
"loss": 1.137, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.5183673469387755, |
|
"grad_norm": 0.024395650252699852, |
|
"learning_rate": 0.00018742109093576313, |
|
"loss": 1.1592, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.5224489795918368, |
|
"grad_norm": 0.024944225326180458, |
|
"learning_rate": 0.000187209872373235, |
|
"loss": 1.1395, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.5265306122448979, |
|
"grad_norm": 0.023946771398186684, |
|
"learning_rate": 0.00018699701628282407, |
|
"loss": 1.1779, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.5306122448979592, |
|
"grad_norm": 0.02391199767589569, |
|
"learning_rate": 0.00018678252666130013, |
|
"loss": 1.1299, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.5346938775510204, |
|
"grad_norm": 0.0230008065700531, |
|
"learning_rate": 0.00018656640753610563, |
|
"loss": 1.1087, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.5387755102040817, |
|
"grad_norm": 0.023263461887836456, |
|
"learning_rate": 0.0001863486629652799, |
|
"loss": 1.0835, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.5428571428571428, |
|
"grad_norm": 0.024622686207294464, |
|
"learning_rate": 0.000186129297037383, |
|
"loss": 1.0911, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.5469387755102041, |
|
"grad_norm": 0.023301225155591965, |
|
"learning_rate": 0.0001859083138714191, |
|
"loss": 1.1345, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.5510204081632653, |
|
"grad_norm": 0.023047855123877525, |
|
"learning_rate": 0.00018568571761675893, |
|
"loss": 1.1353, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.5551020408163265, |
|
"grad_norm": 0.025225356221199036, |
|
"learning_rate": 0.00018546151245306205, |
|
"loss": 1.149, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.5591836734693878, |
|
"grad_norm": 0.024374982342123985, |
|
"learning_rate": 0.00018523570259019827, |
|
"loss": 1.0618, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.563265306122449, |
|
"grad_norm": 0.024517694488167763, |
|
"learning_rate": 0.00018500829226816853, |
|
"loss": 1.148, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.5673469387755102, |
|
"grad_norm": 0.022940896451473236, |
|
"learning_rate": 0.0001847792857570255, |
|
"loss": 1.0824, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.025163279846310616, |
|
"learning_rate": 0.0001845486873567932, |
|
"loss": 1.112, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.5755102040816327, |
|
"grad_norm": 0.024664800614118576, |
|
"learning_rate": 0.00018431650139738633, |
|
"loss": 1.1398, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.5795918367346938, |
|
"grad_norm": 0.02429143153131008, |
|
"learning_rate": 0.000184082732238529, |
|
"loss": 1.2003, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.5836734693877551, |
|
"grad_norm": 0.026087280362844467, |
|
"learning_rate": 0.00018384738426967283, |
|
"loss": 1.1151, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.5877551020408164, |
|
"grad_norm": 0.025691909715533257, |
|
"learning_rate": 0.00018361046190991455, |
|
"loss": 1.1526, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.5918367346938775, |
|
"grad_norm": 0.024672340601682663, |
|
"learning_rate": 0.00018337196960791302, |
|
"loss": 1.1781, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.5959183673469388, |
|
"grad_norm": 0.025797029957175255, |
|
"learning_rate": 0.00018313191184180568, |
|
"loss": 1.2053, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.025542639195919037, |
|
"learning_rate": 0.0001828902931191244, |
|
"loss": 1.0819, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.6040816326530613, |
|
"grad_norm": 0.025126414373517036, |
|
"learning_rate": 0.0001826471179767111, |
|
"loss": 1.1935, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.6081632653061224, |
|
"grad_norm": 0.02403743751347065, |
|
"learning_rate": 0.0001824023909806322, |
|
"loss": 1.1151, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.6122448979591837, |
|
"grad_norm": 0.025347478687763214, |
|
"learning_rate": 0.00018215611672609317, |
|
"loss": 1.0863, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.6163265306122448, |
|
"grad_norm": 0.024234246462583542, |
|
"learning_rate": 0.00018190829983735207, |
|
"loss": 0.9933, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.6204081632653061, |
|
"grad_norm": 0.02541242353618145, |
|
"learning_rate": 0.00018165894496763286, |
|
"loss": 1.016, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.6244897959183674, |
|
"grad_norm": 0.025475289672613144, |
|
"learning_rate": 0.00018140805679903795, |
|
"loss": 1.1199, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.6285714285714286, |
|
"grad_norm": 0.025047162547707558, |
|
"learning_rate": 0.00018115564004246023, |
|
"loss": 1.1866, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.6326530612244898, |
|
"grad_norm": 0.024512339383363724, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 1.0407, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.636734693877551, |
|
"grad_norm": 0.027875879779458046, |
|
"learning_rate": 0.0001806462397523496, |
|
"loss": 1.1148, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.6408163265306123, |
|
"grad_norm": 0.027647798880934715, |
|
"learning_rate": 0.00018038926578375653, |
|
"loss": 1.1748, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.6448979591836734, |
|
"grad_norm": 0.0258516576141119, |
|
"learning_rate": 0.0001801307823568806, |
|
"loss": 1.156, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.6489795918367347, |
|
"grad_norm": 0.02649206854403019, |
|
"learning_rate": 0.00017987079432522996, |
|
"loss": 1.0625, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.6530612244897959, |
|
"grad_norm": 0.027242302894592285, |
|
"learning_rate": 0.00017960930657056438, |
|
"loss": 1.0624, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.6571428571428571, |
|
"grad_norm": 0.026996418833732605, |
|
"learning_rate": 0.0001793463240028038, |
|
"loss": 1.0979, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.6612244897959184, |
|
"grad_norm": 0.025911618024110794, |
|
"learning_rate": 0.00017908185155993605, |
|
"loss": 1.0767, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.6653061224489796, |
|
"grad_norm": 0.02710540033876896, |
|
"learning_rate": 0.0001788158942079241, |
|
"loss": 0.9389, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.6693877551020408, |
|
"grad_norm": 0.026086492463946342, |
|
"learning_rate": 0.00017854845694061292, |
|
"loss": 1.1475, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.6693877551020408, |
|
"eval_loss": 1.1236058473587036, |
|
"eval_runtime": 120.3474, |
|
"eval_samples_per_second": 20.291, |
|
"eval_steps_per_second": 10.146, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.673469387755102, |
|
"grad_norm": 0.02641923353075981, |
|
"learning_rate": 0.00017827954477963557, |
|
"loss": 1.1128, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.6775510204081633, |
|
"grad_norm": 0.026196755468845367, |
|
"learning_rate": 0.00017800916277431908, |
|
"loss": 1.059, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.6816326530612244, |
|
"grad_norm": 0.025523358955979347, |
|
"learning_rate": 0.00017773731600158947, |
|
"loss": 1.1618, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.6857142857142857, |
|
"grad_norm": 0.026307355612516403, |
|
"learning_rate": 0.00017746400956587653, |
|
"loss": 1.0585, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.689795918367347, |
|
"grad_norm": 0.026362139731645584, |
|
"learning_rate": 0.00017718924859901793, |
|
"loss": 1.0543, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.6938775510204082, |
|
"grad_norm": 0.02530169114470482, |
|
"learning_rate": 0.0001769130382601629, |
|
"loss": 1.1009, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.6979591836734694, |
|
"grad_norm": 0.026273014023900032, |
|
"learning_rate": 0.0001766353837356753, |
|
"loss": 1.1963, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.7020408163265306, |
|
"grad_norm": 0.026570703834295273, |
|
"learning_rate": 0.00017635629023903627, |
|
"loss": 1.1098, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.7061224489795919, |
|
"grad_norm": 0.02533114328980446, |
|
"learning_rate": 0.0001760757630107464, |
|
"loss": 1.2321, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.710204081632653, |
|
"grad_norm": 0.02558620274066925, |
|
"learning_rate": 0.00017579380731822712, |
|
"loss": 1.1174, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 0.028141306713223457, |
|
"learning_rate": 0.00017551042845572208, |
|
"loss": 1.0609, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.7183673469387755, |
|
"grad_norm": 0.028484290465712547, |
|
"learning_rate": 0.00017522563174419753, |
|
"loss": 1.0779, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.7224489795918367, |
|
"grad_norm": 0.0262393057346344, |
|
"learning_rate": 0.00017493942253124248, |
|
"loss": 1.0889, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.726530612244898, |
|
"grad_norm": 0.029188042506575584, |
|
"learning_rate": 0.00017465180619096832, |
|
"loss": 1.1816, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.7306122448979592, |
|
"grad_norm": 0.02816024050116539, |
|
"learning_rate": 0.00017436278812390786, |
|
"loss": 1.155, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.7346938775510204, |
|
"grad_norm": 0.026962874457240105, |
|
"learning_rate": 0.00017407237375691392, |
|
"loss": 1.0784, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.7387755102040816, |
|
"grad_norm": 0.027746373787522316, |
|
"learning_rate": 0.00017378056854305747, |
|
"loss": 1.1896, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.7428571428571429, |
|
"grad_norm": 0.02716743014752865, |
|
"learning_rate": 0.00017348737796152522, |
|
"loss": 1.0722, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.746938775510204, |
|
"grad_norm": 0.02938067726790905, |
|
"learning_rate": 0.0001731928075175168, |
|
"loss": 1.0179, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.7510204081632653, |
|
"grad_norm": 0.03580696880817413, |
|
"learning_rate": 0.00017289686274214118, |
|
"loss": 1.0951, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.7551020408163265, |
|
"grad_norm": 0.02849019691348076, |
|
"learning_rate": 0.0001725995491923131, |
|
"loss": 1.1006, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.7591836734693878, |
|
"grad_norm": 0.026822512969374657, |
|
"learning_rate": 0.00017230087245064858, |
|
"loss": 1.1535, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.763265306122449, |
|
"grad_norm": 0.027751388028264046, |
|
"learning_rate": 0.00017200083812536, |
|
"loss": 1.0975, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.7673469387755102, |
|
"grad_norm": 0.027201151475310326, |
|
"learning_rate": 0.00017169945185015106, |
|
"loss": 1.1022, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.7714285714285715, |
|
"grad_norm": 0.026958104223012924, |
|
"learning_rate": 0.00017139671928411072, |
|
"loss": 1.14, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.7755102040816326, |
|
"grad_norm": 0.026919234544038773, |
|
"learning_rate": 0.00017109264611160708, |
|
"loss": 1.1056, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.7795918367346939, |
|
"grad_norm": 0.027088504284620285, |
|
"learning_rate": 0.00017078723804218066, |
|
"loss": 1.2057, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.7836734693877551, |
|
"grad_norm": 0.027464529499411583, |
|
"learning_rate": 0.00017048050081043713, |
|
"loss": 1.1709, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.7877551020408163, |
|
"grad_norm": 0.028090914711356163, |
|
"learning_rate": 0.0001701724401759397, |
|
"loss": 1.1489, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.7918367346938775, |
|
"grad_norm": 0.026381971314549446, |
|
"learning_rate": 0.00016986306192310084, |
|
"loss": 1.0724, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.7959183673469388, |
|
"grad_norm": 0.027733104303479195, |
|
"learning_rate": 0.00016955237186107387, |
|
"loss": 1.1281, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.026476366445422173, |
|
"learning_rate": 0.0001692403758236437, |
|
"loss": 1.19, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.8040816326530612, |
|
"grad_norm": 0.02931978367269039, |
|
"learning_rate": 0.0001689270796691174, |
|
"loss": 1.0904, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.8081632653061225, |
|
"grad_norm": 0.02770647406578064, |
|
"learning_rate": 0.00016861248928021411, |
|
"loss": 1.1499, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.8122448979591836, |
|
"grad_norm": 0.02689889818429947, |
|
"learning_rate": 0.00016829661056395474, |
|
"loss": 1.1525, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.8163265306122449, |
|
"grad_norm": 0.02684464119374752, |
|
"learning_rate": 0.0001679794494515508, |
|
"loss": 1.0428, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.8204081632653061, |
|
"grad_norm": 0.026796666905283928, |
|
"learning_rate": 0.0001676610118982933, |
|
"loss": 1.1829, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.8244897959183674, |
|
"grad_norm": 0.028474919497966766, |
|
"learning_rate": 0.00016734130388344073, |
|
"loss": 1.0853, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.8285714285714286, |
|
"grad_norm": 0.027959391474723816, |
|
"learning_rate": 0.00016702033141010694, |
|
"loss": 1.0554, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.8326530612244898, |
|
"grad_norm": 0.027617380023002625, |
|
"learning_rate": 0.00016669810050514827, |
|
"loss": 1.2237, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.8367346938775511, |
|
"grad_norm": 0.028105339035391808, |
|
"learning_rate": 0.00016637461721905045, |
|
"loss": 1.0778, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.8408163265306122, |
|
"grad_norm": 0.028216930106282234, |
|
"learning_rate": 0.00016604988762581512, |
|
"loss": 1.1507, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.8448979591836735, |
|
"grad_norm": 0.026569265872240067, |
|
"learning_rate": 0.00016572391782284547, |
|
"loss": 1.0134, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.8489795918367347, |
|
"grad_norm": 0.027188677340745926, |
|
"learning_rate": 0.00016539671393083215, |
|
"loss": 1.0784, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.8530612244897959, |
|
"grad_norm": 0.02793797291815281, |
|
"learning_rate": 0.00016506828209363796, |
|
"loss": 1.1477, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 0.027332162484526634, |
|
"learning_rate": 0.00016473862847818277, |
|
"loss": 1.1653, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.8612244897959184, |
|
"grad_norm": 0.02860257215797901, |
|
"learning_rate": 0.00016440775927432753, |
|
"loss": 1.1599, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.8653061224489796, |
|
"grad_norm": 0.028649616986513138, |
|
"learning_rate": 0.0001640756806947582, |
|
"loss": 1.1796, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.8693877551020408, |
|
"grad_norm": 0.027630403637886047, |
|
"learning_rate": 0.000163742398974869, |
|
"loss": 1.1315, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.8734693877551021, |
|
"grad_norm": 0.027829712256789207, |
|
"learning_rate": 0.00016340792037264527, |
|
"loss": 1.0573, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.8775510204081632, |
|
"grad_norm": 0.027864307165145874, |
|
"learning_rate": 0.00016307225116854622, |
|
"loss": 1.083, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.8816326530612245, |
|
"grad_norm": 0.028005510568618774, |
|
"learning_rate": 0.0001627353976653866, |
|
"loss": 1.238, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.8857142857142857, |
|
"grad_norm": 0.02755396068096161, |
|
"learning_rate": 0.00016239736618821885, |
|
"loss": 1.1124, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.889795918367347, |
|
"grad_norm": 0.026409147307276726, |
|
"learning_rate": 0.00016205816308421386, |
|
"loss": 1.1627, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.8938775510204081, |
|
"grad_norm": 0.02800268866121769, |
|
"learning_rate": 0.00016171779472254206, |
|
"loss": 1.0921, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.8979591836734694, |
|
"grad_norm": 0.02667434886097908, |
|
"learning_rate": 0.00016137626749425377, |
|
"loss": 1.207, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.9020408163265307, |
|
"grad_norm": 0.027823835611343384, |
|
"learning_rate": 0.0001610335878121592, |
|
"loss": 1.0776, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.9061224489795918, |
|
"grad_norm": 0.02820996195077896, |
|
"learning_rate": 0.000160689762110708, |
|
"loss": 1.0903, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.9102040816326531, |
|
"grad_norm": 0.02725161425769329, |
|
"learning_rate": 0.00016034479684586854, |
|
"loss": 1.1587, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"grad_norm": 0.027408665046095848, |
|
"learning_rate": 0.0001599986984950065, |
|
"loss": 1.1058, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.9183673469387755, |
|
"grad_norm": 0.027829701080918312, |
|
"learning_rate": 0.00015965147355676343, |
|
"loss": 1.1037, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.9224489795918367, |
|
"grad_norm": 0.027295254170894623, |
|
"learning_rate": 0.0001593031285509347, |
|
"loss": 1.0681, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.926530612244898, |
|
"grad_norm": 0.028630707412958145, |
|
"learning_rate": 0.00015895367001834694, |
|
"loss": 1.1282, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.9306122448979591, |
|
"grad_norm": 0.02818943001329899, |
|
"learning_rate": 0.0001586031045207354, |
|
"loss": 1.1101, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.9346938775510204, |
|
"grad_norm": 0.028520913794636726, |
|
"learning_rate": 0.0001582514386406206, |
|
"loss": 1.1716, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.9387755102040817, |
|
"grad_norm": 0.027190426364541054, |
|
"learning_rate": 0.0001578986789811849, |
|
"loss": 1.0615, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.9428571428571428, |
|
"grad_norm": 0.028909413143992424, |
|
"learning_rate": 0.00015754483216614821, |
|
"loss": 1.0757, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.9469387755102041, |
|
"grad_norm": 0.028231430798768997, |
|
"learning_rate": 0.000157189904839644, |
|
"loss": 1.14, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.9510204081632653, |
|
"grad_norm": 0.02774875983595848, |
|
"learning_rate": 0.00015683390366609426, |
|
"loss": 1.0429, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.9551020408163265, |
|
"grad_norm": 0.030419837683439255, |
|
"learning_rate": 0.00015647683533008455, |
|
"loss": 1.113, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.9591836734693877, |
|
"grad_norm": 0.028292929753661156, |
|
"learning_rate": 0.00015611870653623825, |
|
"loss": 1.1127, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.963265306122449, |
|
"grad_norm": 0.028285473585128784, |
|
"learning_rate": 0.00015575952400909092, |
|
"loss": 1.0493, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.9673469387755103, |
|
"grad_norm": 0.028861958533525467, |
|
"learning_rate": 0.00015539929449296392, |
|
"loss": 1.0879, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.9714285714285714, |
|
"grad_norm": 0.02901526540517807, |
|
"learning_rate": 0.00015503802475183773, |
|
"loss": 1.21, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.9755102040816327, |
|
"grad_norm": 0.02809295803308487, |
|
"learning_rate": 0.00015467572156922503, |
|
"loss": 1.1949, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.9795918367346939, |
|
"grad_norm": 0.03164355456829071, |
|
"learning_rate": 0.00015431239174804328, |
|
"loss": 1.068, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.9836734693877551, |
|
"grad_norm": 0.029855625703930855, |
|
"learning_rate": 0.00015394804211048702, |
|
"loss": 1.1485, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.9877551020408163, |
|
"grad_norm": 0.027486370876431465, |
|
"learning_rate": 0.00015358267949789966, |
|
"loss": 1.0449, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.9918367346938776, |
|
"grad_norm": 0.02765297144651413, |
|
"learning_rate": 0.0001532163107706452, |
|
"loss": 1.0341, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.9959183673469387, |
|
"grad_norm": 0.02840602956712246, |
|
"learning_rate": 0.0001528489428079793, |
|
"loss": 1.2001, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.028239021077752113, |
|
"learning_rate": 0.00015248058250792008, |
|
"loss": 1.1528, |
|
"step": 245 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 735, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 245, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.954959055015117e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|