|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9930394431554523, |
|
"eval_steps": 50, |
|
"global_step": 645, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02320185614849188, |
|
"grad_norm": 29.851993456423653, |
|
"learning_rate": 5e-07, |
|
"loss": 1.6448, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04640371229698376, |
|
"grad_norm": 14.665681394229273, |
|
"learning_rate": 1e-06, |
|
"loss": 1.5147, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06960556844547564, |
|
"grad_norm": 7.818944779433003, |
|
"learning_rate": 9.998470286265414e-07, |
|
"loss": 1.2888, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.09280742459396751, |
|
"grad_norm": 6.011350341653904, |
|
"learning_rate": 9.993882081071305e-07, |
|
"loss": 1.2291, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.11600928074245939, |
|
"grad_norm": 5.834488035843988, |
|
"learning_rate": 9.986238191873872e-07, |
|
"loss": 1.1827, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.13921113689095127, |
|
"grad_norm": 4.944851583820086, |
|
"learning_rate": 9.975543295858033e-07, |
|
"loss": 1.1637, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.16241299303944315, |
|
"grad_norm": 5.051276519398371, |
|
"learning_rate": 9.961803937075514e-07, |
|
"loss": 1.1448, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.18561484918793503, |
|
"grad_norm": 5.008630441799495, |
|
"learning_rate": 9.945028522440653e-07, |
|
"loss": 1.1539, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2088167053364269, |
|
"grad_norm": 4.527889171881266, |
|
"learning_rate": 9.925227316586314e-07, |
|
"loss": 1.1302, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.23201856148491878, |
|
"grad_norm": 4.665448898349117, |
|
"learning_rate": 9.902412435583125e-07, |
|
"loss": 1.1315, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.23201856148491878, |
|
"eval_loss": 1.1243596076965332, |
|
"eval_runtime": 107.3706, |
|
"eval_samples_per_second": 57.083, |
|
"eval_steps_per_second": 0.894, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2552204176334107, |
|
"grad_norm": 4.780072580854242, |
|
"learning_rate": 9.876597839525813e-07, |
|
"loss": 1.1184, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.27842227378190254, |
|
"grad_norm": 4.698899616021614, |
|
"learning_rate": 9.847799323991233e-07, |
|
"loss": 1.1112, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.30162412993039445, |
|
"grad_norm": 4.798544900134018, |
|
"learning_rate": 9.816034510373285e-07, |
|
"loss": 1.1022, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.3248259860788863, |
|
"grad_norm": 4.7554420584590105, |
|
"learning_rate": 9.781322835100637e-07, |
|
"loss": 1.1109, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.3480278422273782, |
|
"grad_norm": 4.715791460597178, |
|
"learning_rate": 9.743685537743856e-07, |
|
"loss": 1.1044, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.37122969837587005, |
|
"grad_norm": 4.781446446456838, |
|
"learning_rate": 9.70314564801922e-07, |
|
"loss": 1.0982, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.39443155452436196, |
|
"grad_norm": 4.616375479434296, |
|
"learning_rate": 9.659727971697173e-07, |
|
"loss": 1.0798, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.4176334106728538, |
|
"grad_norm": 4.81678899141071, |
|
"learning_rate": 9.613459075424033e-07, |
|
"loss": 1.0925, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.4408352668213457, |
|
"grad_norm": 4.804913673447656, |
|
"learning_rate": 9.564367270466245e-07, |
|
"loss": 1.0726, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.46403712296983757, |
|
"grad_norm": 5.428814841421836, |
|
"learning_rate": 9.51248259538713e-07, |
|
"loss": 1.0732, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.46403712296983757, |
|
"eval_loss": 1.079745888710022, |
|
"eval_runtime": 106.1023, |
|
"eval_samples_per_second": 57.765, |
|
"eval_steps_per_second": 0.905, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.4872389791183295, |
|
"grad_norm": 4.842120094208203, |
|
"learning_rate": 9.457836797666721e-07, |
|
"loss": 1.0596, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.5104408352668214, |
|
"grad_norm": 5.09946277444771, |
|
"learning_rate": 9.400463314275941e-07, |
|
"loss": 1.0699, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.5336426914153132, |
|
"grad_norm": 4.86963572334168, |
|
"learning_rate": 9.340397251217008e-07, |
|
"loss": 1.0653, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.5568445475638051, |
|
"grad_norm": 4.762779682097982, |
|
"learning_rate": 9.27767536204258e-07, |
|
"loss": 1.0729, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.580046403712297, |
|
"grad_norm": 4.737687820279848, |
|
"learning_rate": 9.212336025366787e-07, |
|
"loss": 1.0709, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.6032482598607889, |
|
"grad_norm": 4.782611230029462, |
|
"learning_rate": 9.144419221381918e-07, |
|
"loss": 1.0579, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.6264501160092807, |
|
"grad_norm": 4.991569479884324, |
|
"learning_rate": 9.073966507395121e-07, |
|
"loss": 1.0554, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.6496519721577726, |
|
"grad_norm": 5.008963657772678, |
|
"learning_rate": 9.001020992400085e-07, |
|
"loss": 1.043, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.6728538283062645, |
|
"grad_norm": 5.0207483196868985, |
|
"learning_rate": 8.925627310699274e-07, |
|
"loss": 1.0531, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.6960556844547564, |
|
"grad_norm": 5.288716673563479, |
|
"learning_rate": 8.84783159459285e-07, |
|
"loss": 1.0352, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.6960556844547564, |
|
"eval_loss": 1.054144263267517, |
|
"eval_runtime": 106.0227, |
|
"eval_samples_per_second": 57.808, |
|
"eval_steps_per_second": 0.905, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.7192575406032483, |
|
"grad_norm": 5.04033042010659, |
|
"learning_rate": 8.767681446150976e-07, |
|
"loss": 1.0565, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.7424593967517401, |
|
"grad_norm": 4.8573560396750395, |
|
"learning_rate": 8.68522590808682e-07, |
|
"loss": 1.0409, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.765661252900232, |
|
"grad_norm": 4.8296712724118125, |
|
"learning_rate": 8.600515433748001e-07, |
|
"loss": 1.0531, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.7888631090487239, |
|
"grad_norm": 5.039044240861594, |
|
"learning_rate": 8.51360185624495e-07, |
|
"loss": 1.0321, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.8120649651972158, |
|
"grad_norm": 4.944895975035593, |
|
"learning_rate": 8.424538356734956e-07, |
|
"loss": 1.0366, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.8352668213457076, |
|
"grad_norm": 5.568497060519676, |
|
"learning_rate": 8.333379431881397e-07, |
|
"loss": 1.0454, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.8584686774941995, |
|
"grad_norm": 4.984582777418547, |
|
"learning_rate": 8.240180860508026e-07, |
|
"loss": 1.0368, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.8816705336426914, |
|
"grad_norm": 5.7520355685162, |
|
"learning_rate": 8.144999669468713e-07, |
|
"loss": 1.0288, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.9048723897911833, |
|
"grad_norm": 5.580835397673134, |
|
"learning_rate": 8.047894098753539e-07, |
|
"loss": 1.0323, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.9280742459396751, |
|
"grad_norm": 4.941555770840273, |
|
"learning_rate": 7.948923565852597e-07, |
|
"loss": 1.0347, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.9280742459396751, |
|
"eval_loss": 1.0315721035003662, |
|
"eval_runtime": 106.1537, |
|
"eval_samples_per_second": 57.737, |
|
"eval_steps_per_second": 0.904, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.951276102088167, |
|
"grad_norm": 5.3026133930305335, |
|
"learning_rate": 7.848148629399285e-07, |
|
"loss": 1.0264, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.974477958236659, |
|
"grad_norm": 5.3528963387584945, |
|
"learning_rate": 7.745630952115363e-07, |
|
"loss": 1.0204, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.9976798143851509, |
|
"grad_norm": 4.897176107855643, |
|
"learning_rate": 7.641433263080418e-07, |
|
"loss": 1.0287, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.0208816705336428, |
|
"grad_norm": 5.265360108783065, |
|
"learning_rate": 7.535619319348865e-07, |
|
"loss": 0.92, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.0440835266821347, |
|
"grad_norm": 5.842663985054576, |
|
"learning_rate": 7.428253866937918e-07, |
|
"loss": 0.9119, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.0672853828306264, |
|
"grad_norm": 5.255941350708592, |
|
"learning_rate": 7.319402601210447e-07, |
|
"loss": 0.9091, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.0904872389791183, |
|
"grad_norm": 5.099242988471265, |
|
"learning_rate": 7.209132126676933e-07, |
|
"loss": 0.8901, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.1136890951276102, |
|
"grad_norm": 5.963233610769237, |
|
"learning_rate": 7.097509916241145e-07, |
|
"loss": 0.8963, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.136890951276102, |
|
"grad_norm": 5.342236832387553, |
|
"learning_rate": 6.984604269914436e-07, |
|
"loss": 0.878, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.160092807424594, |
|
"grad_norm": 5.312138241916301, |
|
"learning_rate": 6.870484273023967e-07, |
|
"loss": 0.8776, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.160092807424594, |
|
"eval_loss": 1.0247548818588257, |
|
"eval_runtime": 105.9632, |
|
"eval_samples_per_second": 57.841, |
|
"eval_steps_per_second": 0.906, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.1832946635730859, |
|
"grad_norm": 5.364600498172952, |
|
"learning_rate": 6.755219753940388e-07, |
|
"loss": 0.8925, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.2064965197215778, |
|
"grad_norm": 5.752065985447796, |
|
"learning_rate": 6.638881241350883e-07, |
|
"loss": 0.897, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.2296983758700697, |
|
"grad_norm": 5.570817988636112, |
|
"learning_rate": 6.52153992110368e-07, |
|
"loss": 0.8839, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.2529002320185616, |
|
"grad_norm": 5.496660790853752, |
|
"learning_rate": 6.403267592650466e-07, |
|
"loss": 0.8916, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.2761020881670533, |
|
"grad_norm": 5.463347675557559, |
|
"learning_rate": 6.28413662511334e-07, |
|
"loss": 0.8755, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.2993039443155452, |
|
"grad_norm": 5.514605068924435, |
|
"learning_rate": 6.164219913003207e-07, |
|
"loss": 0.8775, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.322505800464037, |
|
"grad_norm": 5.727155908190926, |
|
"learning_rate": 6.043590831616676e-07, |
|
"loss": 0.9028, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.345707656612529, |
|
"grad_norm": 5.429710747722258, |
|
"learning_rate": 5.92232319213878e-07, |
|
"loss": 0.887, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.368909512761021, |
|
"grad_norm": 5.482889252366046, |
|
"learning_rate": 5.800491196478988e-07, |
|
"loss": 0.8759, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.3921113689095128, |
|
"grad_norm": 5.670067440966938, |
|
"learning_rate": 5.678169391868127e-07, |
|
"loss": 0.8888, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.3921113689095128, |
|
"eval_loss": 1.0144710540771484, |
|
"eval_runtime": 105.8665, |
|
"eval_samples_per_second": 57.894, |
|
"eval_steps_per_second": 0.907, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.4153132250580047, |
|
"grad_norm": 5.530990774365388, |
|
"learning_rate": 5.555432625244023e-07, |
|
"loss": 0.8939, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 1.4385150812064964, |
|
"grad_norm": 5.572591226625499, |
|
"learning_rate": 5.432355997453728e-07, |
|
"loss": 0.8929, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.4617169373549883, |
|
"grad_norm": 5.456192949381362, |
|
"learning_rate": 5.309014817300421e-07, |
|
"loss": 0.9025, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.4849187935034802, |
|
"grad_norm": 5.799863863147117, |
|
"learning_rate": 5.185484555463026e-07, |
|
"loss": 0.8753, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.5081206496519721, |
|
"grad_norm": 5.73261677068134, |
|
"learning_rate": 5.061840798316814e-07, |
|
"loss": 0.8938, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.531322505800464, |
|
"grad_norm": 5.762497249365714, |
|
"learning_rate": 4.938159201683186e-07, |
|
"loss": 0.882, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.554524361948956, |
|
"grad_norm": 5.533641700223646, |
|
"learning_rate": 4.814515444536974e-07, |
|
"loss": 0.8846, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.5777262180974478, |
|
"grad_norm": 5.782636301369744, |
|
"learning_rate": 4.69098518269958e-07, |
|
"loss": 0.8875, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.6009280742459397, |
|
"grad_norm": 5.461443407271733, |
|
"learning_rate": 4.5676440025462726e-07, |
|
"loss": 0.8713, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.6241299303944317, |
|
"grad_norm": 5.571957109149912, |
|
"learning_rate": 4.444567374755977e-07, |
|
"loss": 0.8902, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.6241299303944317, |
|
"eval_loss": 1.0069366693496704, |
|
"eval_runtime": 106.2838, |
|
"eval_samples_per_second": 57.666, |
|
"eval_steps_per_second": 0.903, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.6473317865429236, |
|
"grad_norm": 5.528554551947589, |
|
"learning_rate": 4.3218306081318713e-07, |
|
"loss": 0.8789, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.6705336426914155, |
|
"grad_norm": 5.640345642741927, |
|
"learning_rate": 4.199508803521012e-07, |
|
"loss": 0.8774, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.6937354988399071, |
|
"grad_norm": 5.603129030745687, |
|
"learning_rate": 4.0776768078612207e-07, |
|
"loss": 0.8717, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.716937354988399, |
|
"grad_norm": 5.6707399409090895, |
|
"learning_rate": 3.9564091683833244e-07, |
|
"loss": 0.8764, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.740139211136891, |
|
"grad_norm": 5.448091702098461, |
|
"learning_rate": 3.835780086996793e-07, |
|
"loss": 0.865, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.7633410672853829, |
|
"grad_norm": 5.582199831694094, |
|
"learning_rate": 3.7158633748866607e-07, |
|
"loss": 0.8779, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.7865429234338746, |
|
"grad_norm": 5.6752676781492255, |
|
"learning_rate": 3.596732407349536e-07, |
|
"loss": 0.8672, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.8097447795823665, |
|
"grad_norm": 5.5587691039632015, |
|
"learning_rate": 3.4784600788963193e-07, |
|
"loss": 0.8725, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.8329466357308584, |
|
"grad_norm": 5.5052011161351615, |
|
"learning_rate": 3.3611187586491157e-07, |
|
"loss": 0.8615, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.8561484918793503, |
|
"grad_norm": 5.507497258082307, |
|
"learning_rate": 3.244780246059612e-07, |
|
"loss": 0.8773, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.8561484918793503, |
|
"eval_loss": 0.9998582005500793, |
|
"eval_runtime": 105.9976, |
|
"eval_samples_per_second": 57.822, |
|
"eval_steps_per_second": 0.906, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.8793503480278422, |
|
"grad_norm": 5.492700851735595, |
|
"learning_rate": 3.129515726976034e-07, |
|
"loss": 0.8671, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.902552204176334, |
|
"grad_norm": 5.7486034264931165, |
|
"learning_rate": 3.015395730085565e-07, |
|
"loss": 0.8614, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.925754060324826, |
|
"grad_norm": 5.124949765854869, |
|
"learning_rate": 2.902490083758856e-07, |
|
"loss": 0.871, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 1.948955916473318, |
|
"grad_norm": 5.408996022602446, |
|
"learning_rate": 2.790867873323067e-07, |
|
"loss": 0.8795, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.9721577726218098, |
|
"grad_norm": 5.750334709291967, |
|
"learning_rate": 2.680597398789554e-07, |
|
"loss": 0.8588, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.9953596287703017, |
|
"grad_norm": 5.711244881678974, |
|
"learning_rate": 2.5717461330620815e-07, |
|
"loss": 0.8719, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.0185614849187936, |
|
"grad_norm": 6.122014805843198, |
|
"learning_rate": 2.464380680651134e-07, |
|
"loss": 0.7933, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 2.0417633410672855, |
|
"grad_norm": 6.215562802835813, |
|
"learning_rate": 2.358566736919581e-07, |
|
"loss": 0.7943, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.0649651972157774, |
|
"grad_norm": 5.90592154174332, |
|
"learning_rate": 2.2543690478846388e-07, |
|
"loss": 0.778, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 2.0881670533642693, |
|
"grad_norm": 5.689955483676048, |
|
"learning_rate": 2.1518513706007152e-07, |
|
"loss": 0.7847, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.0881670533642693, |
|
"eval_loss": 1.0113998651504517, |
|
"eval_runtime": 106.2215, |
|
"eval_samples_per_second": 57.7, |
|
"eval_steps_per_second": 0.904, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.111368909512761, |
|
"grad_norm": 5.550398002087045, |
|
"learning_rate": 2.051076434147403e-07, |
|
"loss": 0.7843, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 2.1345707656612527, |
|
"grad_norm": 5.681734155354411, |
|
"learning_rate": 1.9521059012464607e-07, |
|
"loss": 0.7678, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.1577726218097446, |
|
"grad_norm": 5.865956254391275, |
|
"learning_rate": 1.855000330531289e-07, |
|
"loss": 0.7897, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 2.1809744779582365, |
|
"grad_norm": 6.055658613132172, |
|
"learning_rate": 1.7598191394919737e-07, |
|
"loss": 0.7811, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.2041763341067284, |
|
"grad_norm": 6.233987278809335, |
|
"learning_rate": 1.666620568118603e-07, |
|
"loss": 0.7975, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 2.2273781902552203, |
|
"grad_norm": 5.824376205357217, |
|
"learning_rate": 1.5754616432650443e-07, |
|
"loss": 0.7801, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.2505800464037122, |
|
"grad_norm": 6.375178064295241, |
|
"learning_rate": 1.4863981437550498e-07, |
|
"loss": 0.781, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 2.273781902552204, |
|
"grad_norm": 6.211607396754804, |
|
"learning_rate": 1.3994845662519983e-07, |
|
"loss": 0.7799, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.296983758700696, |
|
"grad_norm": 5.980327624770438, |
|
"learning_rate": 1.3147740919131812e-07, |
|
"loss": 0.783, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 2.320185614849188, |
|
"grad_norm": 5.925933936835478, |
|
"learning_rate": 1.2323185538490228e-07, |
|
"loss": 0.7668, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.320185614849188, |
|
"eval_loss": 1.0112136602401733, |
|
"eval_runtime": 105.6668, |
|
"eval_samples_per_second": 58.003, |
|
"eval_steps_per_second": 0.909, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.34338747099768, |
|
"grad_norm": 6.06031016994975, |
|
"learning_rate": 1.1521684054071523e-07, |
|
"loss": 0.7762, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 2.3665893271461718, |
|
"grad_norm": 6.173374303238529, |
|
"learning_rate": 1.0743726893007254e-07, |
|
"loss": 0.7806, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.3897911832946637, |
|
"grad_norm": 6.009209700505245, |
|
"learning_rate": 9.989790075999144e-08, |
|
"loss": 0.7731, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 2.4129930394431556, |
|
"grad_norm": 5.744648775110227, |
|
"learning_rate": 9.260334926048785e-08, |
|
"loss": 0.7892, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.4361948955916475, |
|
"grad_norm": 5.949296215452574, |
|
"learning_rate": 8.555807786180813e-08, |
|
"loss": 0.7785, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 2.4593967517401394, |
|
"grad_norm": 5.936014562230361, |
|
"learning_rate": 7.876639746332131e-08, |
|
"loss": 0.7829, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.4825986078886313, |
|
"grad_norm": 5.859713250927308, |
|
"learning_rate": 7.223246379574205e-08, |
|
"loss": 0.7919, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 2.505800464037123, |
|
"grad_norm": 6.243664538264793, |
|
"learning_rate": 6.596027487829913e-08, |
|
"loss": 0.7688, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.529002320185615, |
|
"grad_norm": 6.031972239373065, |
|
"learning_rate": 5.995366857240591e-08, |
|
"loss": 0.7789, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 2.5522041763341066, |
|
"grad_norm": 6.130593159142583, |
|
"learning_rate": 5.421632023332778e-08, |
|
"loss": 0.775, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.5522041763341066, |
|
"eval_loss": 1.0103154182434082, |
|
"eval_runtime": 105.6016, |
|
"eval_samples_per_second": 58.039, |
|
"eval_steps_per_second": 0.909, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.5754060324825985, |
|
"grad_norm": 5.963207804570064, |
|
"learning_rate": 4.8751740461286826e-08, |
|
"loss": 0.789, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 2.5986078886310904, |
|
"grad_norm": 5.8736607983865525, |
|
"learning_rate": 4.356327295337542e-08, |
|
"loss": 0.7992, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.6218097447795823, |
|
"grad_norm": 6.077763163177709, |
|
"learning_rate": 3.865409245759671e-08, |
|
"loss": 0.7824, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 2.645011600928074, |
|
"grad_norm": 5.999232007718555, |
|
"learning_rate": 3.402720283028277e-08, |
|
"loss": 0.7807, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.668213457076566, |
|
"grad_norm": 5.782915072578643, |
|
"learning_rate": 2.968543519807809e-08, |
|
"loss": 0.7836, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 2.691415313225058, |
|
"grad_norm": 5.895133763158038, |
|
"learning_rate": 2.5631446225614527e-08, |
|
"loss": 0.7866, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.71461716937355, |
|
"grad_norm": 5.789961425841594, |
|
"learning_rate": 2.1867716489936294e-08, |
|
"loss": 0.7724, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 2.737819025522042, |
|
"grad_norm": 5.666163619117144, |
|
"learning_rate": 1.8396548962671454e-08, |
|
"loss": 0.7903, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 2.7610208816705337, |
|
"grad_norm": 5.768500926901973, |
|
"learning_rate": 1.5220067600876684e-08, |
|
"loss": 0.7711, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 2.7842227378190256, |
|
"grad_norm": 6.087496150272388, |
|
"learning_rate": 1.2340216047418694e-08, |
|
"loss": 0.7768, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.7842227378190256, |
|
"eval_loss": 1.008864164352417, |
|
"eval_runtime": 105.8761, |
|
"eval_samples_per_second": 57.888, |
|
"eval_steps_per_second": 0.907, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.8074245939675175, |
|
"grad_norm": 6.207391484980199, |
|
"learning_rate": 9.758756441687332e-09, |
|
"loss": 0.7875, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 2.8306264501160094, |
|
"grad_norm": 5.954613559365798, |
|
"learning_rate": 7.477268341368359e-09, |
|
"loss": 0.785, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 2.853828306264501, |
|
"grad_norm": 5.819327712838159, |
|
"learning_rate": 5.497147755934628e-09, |
|
"loss": 0.7832, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 2.877030162412993, |
|
"grad_norm": 5.786005516883902, |
|
"learning_rate": 3.819606292448541e-09, |
|
"loss": 0.7789, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 2.9002320185614847, |
|
"grad_norm": 6.130101975578697, |
|
"learning_rate": 2.4456704141967433e-09, |
|
"loss": 0.7973, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 2.9234338747099766, |
|
"grad_norm": 6.065019587560417, |
|
"learning_rate": 1.3761808126126483e-09, |
|
"loss": 0.7696, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 2.9466357308584685, |
|
"grad_norm": 5.601585670340757, |
|
"learning_rate": 6.117918928693622e-10, |
|
"loss": 0.7886, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 2.9698375870069604, |
|
"grad_norm": 6.062046858888585, |
|
"learning_rate": 1.529713734584326e-10, |
|
"loss": 0.7661, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 2.9930394431554523, |
|
"grad_norm": 6.1764579820644885, |
|
"learning_rate": 0.0, |
|
"loss": 0.7806, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 2.9930394431554523, |
|
"step": 645, |
|
"total_flos": 3802978389590016.0, |
|
"train_loss": 0.9239856169205304, |
|
"train_runtime": 9561.8566, |
|
"train_samples_per_second": 17.305, |
|
"train_steps_per_second": 0.067 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 645, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3802978389590016.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|