|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.004995254508217193, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 4.9952545082171936e-05, |
|
"grad_norm": 0.86328125, |
|
"learning_rate": 0.0002, |
|
"loss": 3.1772, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 9.990509016434387e-05, |
|
"grad_norm": 0.8515625, |
|
"learning_rate": 0.0002, |
|
"loss": 2.9924, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0001498576352465158, |
|
"grad_norm": 1.078125, |
|
"learning_rate": 0.0002, |
|
"loss": 2.7756, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00019981018032868775, |
|
"grad_norm": 0.92578125, |
|
"learning_rate": 0.0002, |
|
"loss": 2.6268, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0002497627254108597, |
|
"grad_norm": 0.97265625, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3838, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0002997152704930316, |
|
"grad_norm": 0.80078125, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1048, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0003496678155752036, |
|
"grad_norm": 0.625, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0735, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0003996203606573755, |
|
"grad_norm": 0.46875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.969, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0004495729057395474, |
|
"grad_norm": 0.427734375, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0166, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0004995254508217194, |
|
"grad_norm": 0.33203125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9446, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0005494779959038913, |
|
"grad_norm": 0.31640625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.975, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0005994305409860632, |
|
"grad_norm": 0.267578125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9867, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0006493830860682351, |
|
"grad_norm": 0.25390625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9101, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0006993356311504072, |
|
"grad_norm": 0.2109375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9296, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0007492881762325791, |
|
"grad_norm": 0.2158203125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9783, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.000799240721314751, |
|
"grad_norm": 0.1982421875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9612, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0008491932663969229, |
|
"grad_norm": 0.2138671875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9648, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0008991458114790948, |
|
"grad_norm": 0.197265625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9303, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0009490983565612668, |
|
"grad_norm": 0.201171875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9416, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0009990509016434388, |
|
"grad_norm": 0.1904296875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9454, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0010490034467256107, |
|
"grad_norm": 0.2001953125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9603, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0010989559918077826, |
|
"grad_norm": 0.171875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9216, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0011489085368899545, |
|
"grad_norm": 0.2255859375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9503, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0011988610819721264, |
|
"grad_norm": 0.1572265625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9421, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0012488136270542983, |
|
"grad_norm": 0.1591796875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9211, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0012987661721364703, |
|
"grad_norm": 0.1611328125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9854, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0013487187172186424, |
|
"grad_norm": 0.1455078125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9081, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0013986712623008143, |
|
"grad_norm": 0.15625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9687, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0014486238073829862, |
|
"grad_norm": 0.1494140625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9339, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0014985763524651581, |
|
"grad_norm": 0.1484375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9282, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.00154852889754733, |
|
"grad_norm": 0.138671875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9222, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.001598481442629502, |
|
"grad_norm": 0.158203125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.937, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0016484339877116739, |
|
"grad_norm": 0.1455078125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9536, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0016983865327938458, |
|
"grad_norm": 0.13671875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9108, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0017483390778760177, |
|
"grad_norm": 0.1376953125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9168, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0017982916229581896, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9554, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0018482441680403618, |
|
"grad_norm": 0.1396484375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9365, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0018981967131225337, |
|
"grad_norm": 0.130859375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9047, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0019481492582047056, |
|
"grad_norm": 0.138671875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.953, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0019981018032868775, |
|
"grad_norm": 0.146484375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9552, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0020480543483690494, |
|
"grad_norm": 0.140625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9421, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0020980068934512213, |
|
"grad_norm": 0.134765625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9078, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0021479594385333933, |
|
"grad_norm": 0.1669921875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9458, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.002197911983615565, |
|
"grad_norm": 0.1357421875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9172, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.002247864528697737, |
|
"grad_norm": 0.1396484375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8999, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.002297817073779909, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9224, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.002347769618862081, |
|
"grad_norm": 0.18359375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9183, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.002397722163944253, |
|
"grad_norm": 0.1630859375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9258, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0024476747090264248, |
|
"grad_norm": 0.1689453125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9046, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0024976272541085967, |
|
"grad_norm": 0.16015625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9506, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0025475797991907686, |
|
"grad_norm": 0.171875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9396, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0025975323442729405, |
|
"grad_norm": 0.1552734375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9131, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.002647484889355113, |
|
"grad_norm": 0.1591796875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8852, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.0026974374344372848, |
|
"grad_norm": 0.154296875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9306, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0027473899795194567, |
|
"grad_norm": 0.14453125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8989, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.0027973425246016286, |
|
"grad_norm": 0.154296875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9273, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.0028472950696838005, |
|
"grad_norm": 0.16015625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9314, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.0028972476147659724, |
|
"grad_norm": 0.1474609375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9098, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0029472001598481443, |
|
"grad_norm": 0.1884765625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8778, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.0029971527049303163, |
|
"grad_norm": 0.259765625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8856, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.003047105250012488, |
|
"grad_norm": 0.21484375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8735, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.00309705779509466, |
|
"grad_norm": 0.154296875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9047, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.003147010340176832, |
|
"grad_norm": 0.1806640625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9401, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.003196962885259004, |
|
"grad_norm": 0.1826171875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9593, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.003246915430341176, |
|
"grad_norm": 0.1494140625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8762, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.0032968679754233478, |
|
"grad_norm": 0.1943359375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9102, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.0033468205205055197, |
|
"grad_norm": 0.203125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8891, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0033967730655876916, |
|
"grad_norm": 0.1748046875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.89, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.0034467256106698635, |
|
"grad_norm": 0.1591796875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9058, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.0034966781557520354, |
|
"grad_norm": 0.1875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.921, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0035466307008342073, |
|
"grad_norm": 0.169921875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9024, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.0035965832459163793, |
|
"grad_norm": 0.201171875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.888, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.003646535790998551, |
|
"grad_norm": 0.1923828125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9482, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.0036964883360807235, |
|
"grad_norm": 0.19140625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9083, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.0037464408811628954, |
|
"grad_norm": 0.177734375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8941, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0037963934262450674, |
|
"grad_norm": 0.1875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8182, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.0038463459713272393, |
|
"grad_norm": 0.2001953125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8851, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.003896298516409411, |
|
"grad_norm": 0.1806640625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8908, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.003946251061491583, |
|
"grad_norm": 0.17578125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.865, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.003996203606573755, |
|
"grad_norm": 0.1884765625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8739, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.004046156151655927, |
|
"grad_norm": 0.2001953125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8663, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.004096108696738099, |
|
"grad_norm": 0.1708984375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8611, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.004146061241820271, |
|
"grad_norm": 0.19921875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9407, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.004196013786902443, |
|
"grad_norm": 0.177734375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8856, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.004245966331984615, |
|
"grad_norm": 0.193359375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8824, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.0042959188770667865, |
|
"grad_norm": 0.1787109375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8966, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.004345871422148958, |
|
"grad_norm": 0.19140625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.889, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.00439582396723113, |
|
"grad_norm": 0.1884765625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8558, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.004445776512313302, |
|
"grad_norm": 0.189453125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8981, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.004495729057395474, |
|
"grad_norm": 0.189453125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8875, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.004545681602477646, |
|
"grad_norm": 0.248046875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9172, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.004595634147559818, |
|
"grad_norm": 0.189453125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8927, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.00464558669264199, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8491, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.004695539237724162, |
|
"grad_norm": 0.255859375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9081, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.004745491782806334, |
|
"grad_norm": 0.255859375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8536, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.004795444327888506, |
|
"grad_norm": 0.2080078125, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8619, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.004845396872970678, |
|
"grad_norm": 0.216796875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8592, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.0048953494180528495, |
|
"grad_norm": 0.2119140625, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8504, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.004945301963135021, |
|
"grad_norm": 0.2177734375, |
|
"learning_rate": 0.0002, |
|
"loss": 1.851, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.004995254508217193, |
|
"grad_norm": 0.1982421875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8595, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 12011400, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 600, |
|
"save_steps": 20, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2021802943447040.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|