aseratus1's picture
Training in progress, step 1050, checkpoint
0af757a verified
raw
history blame
20.9 kB
{
"best_metric": 0.4649004638195038,
"best_model_checkpoint": "miner_id_24/checkpoint-1050",
"epoch": 0.3981042654028436,
"eval_steps": 150,
"global_step": 1050,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0003791469194312796,
"eval_loss": 1.7795168161392212,
"eval_runtime": 209.8549,
"eval_samples_per_second": 21.167,
"eval_steps_per_second": 5.294,
"step": 1
},
{
"epoch": 0.0037914691943127963,
"grad_norm": 2.3918707370758057,
"learning_rate": 2e-05,
"loss": 2.1878,
"step": 10
},
{
"epoch": 0.007582938388625593,
"grad_norm": 1.775721788406372,
"learning_rate": 4e-05,
"loss": 1.257,
"step": 20
},
{
"epoch": 0.011374407582938388,
"grad_norm": 1.3155328035354614,
"learning_rate": 6e-05,
"loss": 0.7683,
"step": 30
},
{
"epoch": 0.015165876777251185,
"grad_norm": 1.3514695167541504,
"learning_rate": 8e-05,
"loss": 0.5549,
"step": 40
},
{
"epoch": 0.018957345971563982,
"grad_norm": 1.1452535390853882,
"learning_rate": 0.0001,
"loss": 0.3957,
"step": 50
},
{
"epoch": 0.022748815165876776,
"grad_norm": 1.4056144952774048,
"learning_rate": 9.999631611658893e-05,
"loss": 1.3166,
"step": 60
},
{
"epoch": 0.026540284360189573,
"grad_norm": 1.0476895570755005,
"learning_rate": 9.998526500919558e-05,
"loss": 0.6856,
"step": 70
},
{
"epoch": 0.03033175355450237,
"grad_norm": 1.057721495628357,
"learning_rate": 9.996684830625961e-05,
"loss": 0.5876,
"step": 80
},
{
"epoch": 0.034123222748815164,
"grad_norm": 0.8883450627326965,
"learning_rate": 9.99410687215805e-05,
"loss": 0.5092,
"step": 90
},
{
"epoch": 0.037914691943127965,
"grad_norm": 1.0792176723480225,
"learning_rate": 9.990793005391757e-05,
"loss": 0.3727,
"step": 100
},
{
"epoch": 0.04170616113744076,
"grad_norm": 0.9899026155471802,
"learning_rate": 9.986743718643037e-05,
"loss": 1.0962,
"step": 110
},
{
"epoch": 0.04549763033175355,
"grad_norm": 0.9852607250213623,
"learning_rate": 9.981959608595904e-05,
"loss": 0.6074,
"step": 120
},
{
"epoch": 0.04928909952606635,
"grad_norm": 0.8935758471488953,
"learning_rate": 9.976441380214499e-05,
"loss": 0.5443,
"step": 130
},
{
"epoch": 0.05308056872037915,
"grad_norm": 0.8601885437965393,
"learning_rate": 9.970189846639224e-05,
"loss": 0.4588,
"step": 140
},
{
"epoch": 0.05687203791469194,
"grad_norm": 0.8278557658195496,
"learning_rate": 9.963205929066912e-05,
"loss": 0.3571,
"step": 150
},
{
"epoch": 0.05687203791469194,
"eval_loss": 0.6163225173950195,
"eval_runtime": 211.3771,
"eval_samples_per_second": 21.015,
"eval_steps_per_second": 5.256,
"step": 150
},
{
"epoch": 0.06066350710900474,
"grad_norm": 0.9639042019844055,
"learning_rate": 9.955490656615086e-05,
"loss": 0.9907,
"step": 160
},
{
"epoch": 0.06445497630331753,
"grad_norm": 1.0571763515472412,
"learning_rate": 9.947045166170315e-05,
"loss": 0.6059,
"step": 170
},
{
"epoch": 0.06824644549763033,
"grad_norm": 0.8203420639038086,
"learning_rate": 9.937870702220684e-05,
"loss": 0.5407,
"step": 180
},
{
"epoch": 0.07203791469194312,
"grad_norm": 0.7809204459190369,
"learning_rate": 9.927968616672416e-05,
"loss": 0.4592,
"step": 190
},
{
"epoch": 0.07582938388625593,
"grad_norm": 0.6434981226921082,
"learning_rate": 9.917340368650657e-05,
"loss": 0.3295,
"step": 200
},
{
"epoch": 0.07962085308056872,
"grad_norm": 0.8934921026229858,
"learning_rate": 9.905987524284471e-05,
"loss": 0.9521,
"step": 210
},
{
"epoch": 0.08341232227488152,
"grad_norm": 0.8254252672195435,
"learning_rate": 9.89391175647606e-05,
"loss": 0.5738,
"step": 220
},
{
"epoch": 0.08720379146919431,
"grad_norm": 0.840071976184845,
"learning_rate": 9.881114844654249e-05,
"loss": 0.5222,
"step": 230
},
{
"epoch": 0.0909952606635071,
"grad_norm": 0.8142471313476562,
"learning_rate": 9.867598674512288e-05,
"loss": 0.4142,
"step": 240
},
{
"epoch": 0.0947867298578199,
"grad_norm": 0.6625562310218811,
"learning_rate": 9.853365237729976e-05,
"loss": 0.3158,
"step": 250
},
{
"epoch": 0.0985781990521327,
"grad_norm": 0.9738134145736694,
"learning_rate": 9.838416631680176e-05,
"loss": 0.9377,
"step": 260
},
{
"epoch": 0.1023696682464455,
"grad_norm": 0.919395387172699,
"learning_rate": 9.822755059119765e-05,
"loss": 0.5472,
"step": 270
},
{
"epoch": 0.1061611374407583,
"grad_norm": 0.9126551151275635,
"learning_rate": 9.806382827865035e-05,
"loss": 0.4959,
"step": 280
},
{
"epoch": 0.10995260663507109,
"grad_norm": 0.7662134766578674,
"learning_rate": 9.78930235045163e-05,
"loss": 0.4471,
"step": 290
},
{
"epoch": 0.11374407582938388,
"grad_norm": 0.6991143226623535,
"learning_rate": 9.771516143779049e-05,
"loss": 0.345,
"step": 300
},
{
"epoch": 0.11374407582938388,
"eval_loss": 0.5615507960319519,
"eval_runtime": 210.9515,
"eval_samples_per_second": 21.057,
"eval_steps_per_second": 5.267,
"step": 300
},
{
"epoch": 0.11753554502369669,
"grad_norm": 0.8847883343696594,
"learning_rate": 9.753026828739756e-05,
"loss": 0.9371,
"step": 310
},
{
"epoch": 0.12132701421800948,
"grad_norm": 0.840414822101593,
"learning_rate": 9.733837129832993e-05,
"loss": 0.5609,
"step": 320
},
{
"epoch": 0.12511848341232226,
"grad_norm": 0.8552011251449585,
"learning_rate": 9.713949874763296e-05,
"loss": 0.4904,
"step": 330
},
{
"epoch": 0.12890995260663507,
"grad_norm": 0.7504507899284363,
"learning_rate": 9.693367994023828e-05,
"loss": 0.4276,
"step": 340
},
{
"epoch": 0.13270142180094788,
"grad_norm": 0.7728025317192078,
"learning_rate": 9.672094520464552e-05,
"loss": 0.3153,
"step": 350
},
{
"epoch": 0.13649289099526066,
"grad_norm": 0.8927388191223145,
"learning_rate": 9.650132588845318e-05,
"loss": 0.8539,
"step": 360
},
{
"epoch": 0.14028436018957346,
"grad_norm": 0.9280526638031006,
"learning_rate": 9.627485435373948e-05,
"loss": 0.5319,
"step": 370
},
{
"epoch": 0.14407582938388624,
"grad_norm": 0.8443691730499268,
"learning_rate": 9.604156397229367e-05,
"loss": 0.4799,
"step": 380
},
{
"epoch": 0.14786729857819905,
"grad_norm": 0.8244546055793762,
"learning_rate": 9.580148912069836e-05,
"loss": 0.4255,
"step": 390
},
{
"epoch": 0.15165876777251186,
"grad_norm": 0.587851881980896,
"learning_rate": 9.555466517526405e-05,
"loss": 0.3149,
"step": 400
},
{
"epoch": 0.15545023696682464,
"grad_norm": 0.9294399619102478,
"learning_rate": 9.53011285068163e-05,
"loss": 0.8398,
"step": 410
},
{
"epoch": 0.15924170616113745,
"grad_norm": 0.8756105303764343,
"learning_rate": 9.50409164753362e-05,
"loss": 0.5178,
"step": 420
},
{
"epoch": 0.16303317535545023,
"grad_norm": 0.7490562796592712,
"learning_rate": 9.477406742445516e-05,
"loss": 0.4677,
"step": 430
},
{
"epoch": 0.16682464454976303,
"grad_norm": 0.8731195330619812,
"learning_rate": 9.450062067580488e-05,
"loss": 0.4073,
"step": 440
},
{
"epoch": 0.17061611374407584,
"grad_norm": 0.7838053107261658,
"learning_rate": 9.422061652322298e-05,
"loss": 0.2986,
"step": 450
},
{
"epoch": 0.17061611374407584,
"eval_loss": 0.53005450963974,
"eval_runtime": 211.0482,
"eval_samples_per_second": 21.047,
"eval_steps_per_second": 5.264,
"step": 450
},
{
"epoch": 0.17440758293838862,
"grad_norm": 0.840986430644989,
"learning_rate": 9.393409622681559e-05,
"loss": 0.8218,
"step": 460
},
{
"epoch": 0.17819905213270143,
"grad_norm": 0.8100599050521851,
"learning_rate": 9.364110200687738e-05,
"loss": 0.5342,
"step": 470
},
{
"epoch": 0.1819905213270142,
"grad_norm": 0.8114942908287048,
"learning_rate": 9.33416770376702e-05,
"loss": 0.4595,
"step": 480
},
{
"epoch": 0.18578199052132702,
"grad_norm": 0.6884726285934448,
"learning_rate": 9.303586544106115e-05,
"loss": 0.411,
"step": 490
},
{
"epoch": 0.1895734597156398,
"grad_norm": 0.8265155553817749,
"learning_rate": 9.272371228002091e-05,
"loss": 0.3095,
"step": 500
},
{
"epoch": 0.1933649289099526,
"grad_norm": 0.8282197117805481,
"learning_rate": 9.240526355198353e-05,
"loss": 0.8212,
"step": 510
},
{
"epoch": 0.1971563981042654,
"grad_norm": 0.783984899520874,
"learning_rate": 9.208056618206853e-05,
"loss": 0.5332,
"step": 520
},
{
"epoch": 0.2009478672985782,
"grad_norm": 0.7088342905044556,
"learning_rate": 9.174966801616603e-05,
"loss": 0.459,
"step": 530
},
{
"epoch": 0.204739336492891,
"grad_norm": 0.7358261942863464,
"learning_rate": 9.141261781388664e-05,
"loss": 0.3989,
"step": 540
},
{
"epoch": 0.20853080568720378,
"grad_norm": 0.686541736125946,
"learning_rate": 9.10694652413763e-05,
"loss": 0.3132,
"step": 550
},
{
"epoch": 0.2123222748815166,
"grad_norm": 0.8187770247459412,
"learning_rate": 9.072026086399777e-05,
"loss": 0.8469,
"step": 560
},
{
"epoch": 0.2161137440758294,
"grad_norm": 0.8380711674690247,
"learning_rate": 9.03650561388796e-05,
"loss": 0.5158,
"step": 570
},
{
"epoch": 0.21990521327014217,
"grad_norm": 0.7505501508712769,
"learning_rate": 9.000390340733353e-05,
"loss": 0.4408,
"step": 580
},
{
"epoch": 0.22369668246445498,
"grad_norm": 0.7203567028045654,
"learning_rate": 8.963685588714185e-05,
"loss": 0.3953,
"step": 590
},
{
"epoch": 0.22748815165876776,
"grad_norm": 0.6150539517402649,
"learning_rate": 8.926396766471537e-05,
"loss": 0.2903,
"step": 600
},
{
"epoch": 0.22748815165876776,
"eval_loss": 0.5122374892234802,
"eval_runtime": 211.7681,
"eval_samples_per_second": 20.976,
"eval_steps_per_second": 5.246,
"step": 600
},
{
"epoch": 0.23127962085308057,
"grad_norm": 0.8199161291122437,
"learning_rate": 8.888529368712357e-05,
"loss": 0.8378,
"step": 610
},
{
"epoch": 0.23507109004739338,
"grad_norm": 0.7916120886802673,
"learning_rate": 8.850088975399781e-05,
"loss": 0.5298,
"step": 620
},
{
"epoch": 0.23886255924170616,
"grad_norm": 0.836801290512085,
"learning_rate": 8.811081250930902e-05,
"loss": 0.4505,
"step": 630
},
{
"epoch": 0.24265402843601896,
"grad_norm": 0.688791036605835,
"learning_rate": 8.771511943302079e-05,
"loss": 0.4163,
"step": 640
},
{
"epoch": 0.24644549763033174,
"grad_norm": 0.7080439925193787,
"learning_rate": 8.731386883261952e-05,
"loss": 0.3036,
"step": 650
},
{
"epoch": 0.2502369668246445,
"grad_norm": 0.7296505570411682,
"learning_rate": 8.690711983452243e-05,
"loss": 0.8379,
"step": 660
},
{
"epoch": 0.25402843601895736,
"grad_norm": 0.7905020117759705,
"learning_rate": 8.649493237536499e-05,
"loss": 0.5306,
"step": 670
},
{
"epoch": 0.25781990521327014,
"grad_norm": 0.8950763940811157,
"learning_rate": 8.60773671931689e-05,
"loss": 0.46,
"step": 680
},
{
"epoch": 0.2616113744075829,
"grad_norm": 0.7092387080192566,
"learning_rate": 8.56544858183921e-05,
"loss": 0.3971,
"step": 690
},
{
"epoch": 0.26540284360189575,
"grad_norm": 0.7552494406700134,
"learning_rate": 8.522635056486181e-05,
"loss": 0.2735,
"step": 700
},
{
"epoch": 0.26919431279620853,
"grad_norm": 0.7642441391944885,
"learning_rate": 8.479302452059238e-05,
"loss": 0.7899,
"step": 710
},
{
"epoch": 0.2729857819905213,
"grad_norm": 0.773184061050415,
"learning_rate": 8.435457153848887e-05,
"loss": 0.5101,
"step": 720
},
{
"epoch": 0.27677725118483415,
"grad_norm": 0.8441540002822876,
"learning_rate": 8.391105622693793e-05,
"loss": 0.4299,
"step": 730
},
{
"epoch": 0.28056872037914693,
"grad_norm": 0.6840046048164368,
"learning_rate": 8.346254394028754e-05,
"loss": 0.3795,
"step": 740
},
{
"epoch": 0.2843601895734597,
"grad_norm": 0.5915653705596924,
"learning_rate": 8.30091007692166e-05,
"loss": 0.2805,
"step": 750
},
{
"epoch": 0.2843601895734597,
"eval_loss": 0.49101725220680237,
"eval_runtime": 211.5623,
"eval_samples_per_second": 20.996,
"eval_steps_per_second": 5.251,
"step": 750
},
{
"epoch": 0.2881516587677725,
"grad_norm": 0.8108296394348145,
"learning_rate": 8.255079353099611e-05,
"loss": 0.7564,
"step": 760
},
{
"epoch": 0.2919431279620853,
"grad_norm": 0.778976559638977,
"learning_rate": 8.208768975964338e-05,
"loss": 0.5116,
"step": 770
},
{
"epoch": 0.2957345971563981,
"grad_norm": 0.7423689961433411,
"learning_rate": 8.161985769597045e-05,
"loss": 0.4358,
"step": 780
},
{
"epoch": 0.2995260663507109,
"grad_norm": 0.7508371472358704,
"learning_rate": 8.114736627752846e-05,
"loss": 0.3686,
"step": 790
},
{
"epoch": 0.3033175355450237,
"grad_norm": 0.5939842462539673,
"learning_rate": 8.067028512844929e-05,
"loss": 0.2847,
"step": 800
},
{
"epoch": 0.3071090047393365,
"grad_norm": 0.8010008931159973,
"learning_rate": 8.018868454918627e-05,
"loss": 0.7743,
"step": 810
},
{
"epoch": 0.3109004739336493,
"grad_norm": 0.8660693764686584,
"learning_rate": 7.970263550615469e-05,
"loss": 0.5334,
"step": 820
},
{
"epoch": 0.31469194312796206,
"grad_norm": 0.708128035068512,
"learning_rate": 7.921220962127487e-05,
"loss": 0.4534,
"step": 830
},
{
"epoch": 0.3184834123222749,
"grad_norm": 0.7724855542182922,
"learning_rate": 7.871747916141808e-05,
"loss": 0.3683,
"step": 840
},
{
"epoch": 0.3222748815165877,
"grad_norm": 0.6242368817329407,
"learning_rate": 7.821851702775765e-05,
"loss": 0.2945,
"step": 850
},
{
"epoch": 0.32606635071090045,
"grad_norm": 0.8044713139533997,
"learning_rate": 7.771539674502667e-05,
"loss": 0.7826,
"step": 860
},
{
"epoch": 0.3298578199052133,
"grad_norm": 0.7477179765701294,
"learning_rate": 7.720819245068368e-05,
"loss": 0.4976,
"step": 870
},
{
"epoch": 0.33364928909952607,
"grad_norm": 0.7895752191543579,
"learning_rate": 7.669697888398812e-05,
"loss": 0.4432,
"step": 880
},
{
"epoch": 0.33744075829383885,
"grad_norm": 0.7435291409492493,
"learning_rate": 7.618183137498709e-05,
"loss": 0.3796,
"step": 890
},
{
"epoch": 0.3412322274881517,
"grad_norm": 0.8892961144447327,
"learning_rate": 7.56628258334151e-05,
"loss": 0.2694,
"step": 900
},
{
"epoch": 0.3412322274881517,
"eval_loss": 0.48085248470306396,
"eval_runtime": 211.2971,
"eval_samples_per_second": 21.023,
"eval_steps_per_second": 5.258,
"step": 900
},
{
"epoch": 0.34502369668246446,
"grad_norm": 0.7861023545265198,
"learning_rate": 7.514003873750836e-05,
"loss": 0.7591,
"step": 910
},
{
"epoch": 0.34881516587677724,
"grad_norm": 0.7269836068153381,
"learning_rate": 7.461354712273526e-05,
"loss": 0.502,
"step": 920
},
{
"epoch": 0.35260663507109,
"grad_norm": 0.7629136443138123,
"learning_rate": 7.408342857044484e-05,
"loss": 0.4215,
"step": 930
},
{
"epoch": 0.35639810426540286,
"grad_norm": 0.668658435344696,
"learning_rate": 7.354976119643472e-05,
"loss": 0.3744,
"step": 940
},
{
"epoch": 0.36018957345971564,
"grad_norm": 0.6049548387527466,
"learning_rate": 7.301262363944035e-05,
"loss": 0.2709,
"step": 950
},
{
"epoch": 0.3639810426540284,
"grad_norm": 0.8917579650878906,
"learning_rate": 7.247209504954715e-05,
"loss": 0.7532,
"step": 960
},
{
"epoch": 0.36777251184834125,
"grad_norm": 0.7818393111228943,
"learning_rate": 7.192825507652734e-05,
"loss": 0.4742,
"step": 970
},
{
"epoch": 0.37156398104265403,
"grad_norm": 0.7396854162216187,
"learning_rate": 7.138118385810313e-05,
"loss": 0.4385,
"step": 980
},
{
"epoch": 0.3753554502369668,
"grad_norm": 0.8227097392082214,
"learning_rate": 7.083096200813794e-05,
"loss": 0.3656,
"step": 990
},
{
"epoch": 0.3791469194312796,
"grad_norm": 0.6364562511444092,
"learning_rate": 7.027767060475764e-05,
"loss": 0.2728,
"step": 1000
},
{
"epoch": 0.38293838862559243,
"grad_norm": 0.7405619025230408,
"learning_rate": 6.972139117840307e-05,
"loss": 0.7329,
"step": 1010
},
{
"epoch": 0.3867298578199052,
"grad_norm": 0.761043131351471,
"learning_rate": 6.91622056998163e-05,
"loss": 0.4928,
"step": 1020
},
{
"epoch": 0.390521327014218,
"grad_norm": 0.7283722758293152,
"learning_rate": 6.860019656796163e-05,
"loss": 0.4324,
"step": 1030
},
{
"epoch": 0.3943127962085308,
"grad_norm": 0.6700026392936707,
"learning_rate": 6.80354465978838e-05,
"loss": 0.371,
"step": 1040
},
{
"epoch": 0.3981042654028436,
"grad_norm": 0.6622222065925598,
"learning_rate": 6.746803900850462e-05,
"loss": 0.2729,
"step": 1050
},
{
"epoch": 0.3981042654028436,
"eval_loss": 0.4649004638195038,
"eval_runtime": 211.8681,
"eval_samples_per_second": 20.966,
"eval_steps_per_second": 5.244,
"step": 1050
}
],
"logging_steps": 10,
"max_steps": 2638,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 150,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 2,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.315071348755661e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}