Qwen-2.5-Coder-3B-SQL-Writer / trainer_state.json
thanhdathoang's picture
Upload folder using huggingface_hub
b433eba verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9946814168705456,
"eval_steps": 500,
"global_step": 586,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0034038932028507603,
"grad_norm": 7.0625,
"learning_rate": 1.9999856294471073e-05,
"loss": 1.1956,
"step": 1
},
{
"epoch": 0.017019466014253803,
"grad_norm": 1.453125,
"learning_rate": 1.9996407568285402e-05,
"loss": 0.8575,
"step": 5
},
{
"epoch": 0.03403893202850761,
"grad_norm": 1.046875,
"learning_rate": 1.9985632854254735e-05,
"loss": 0.7124,
"step": 10
},
{
"epoch": 0.05105839804276141,
"grad_norm": 0.9140625,
"learning_rate": 1.996768359939288e-05,
"loss": 0.6181,
"step": 15
},
{
"epoch": 0.06807786405701521,
"grad_norm": 1.0078125,
"learning_rate": 1.994257269999431e-05,
"loss": 0.613,
"step": 20
},
{
"epoch": 0.08509733007126902,
"grad_norm": 0.84375,
"learning_rate": 1.991031819789732e-05,
"loss": 0.5848,
"step": 25
},
{
"epoch": 0.10211679608552282,
"grad_norm": 0.84375,
"learning_rate": 1.9870943267521144e-05,
"loss": 0.5576,
"step": 30
},
{
"epoch": 0.11913626209977662,
"grad_norm": 0.875,
"learning_rate": 1.9824476199215526e-05,
"loss": 0.5539,
"step": 35
},
{
"epoch": 0.13615572811403043,
"grad_norm": 0.8828125,
"learning_rate": 1.9770950378934433e-05,
"loss": 0.5261,
"step": 40
},
{
"epoch": 0.15317519412828423,
"grad_norm": 0.83984375,
"learning_rate": 1.9710404264248734e-05,
"loss": 0.5085,
"step": 45
},
{
"epoch": 0.17019466014253803,
"grad_norm": 0.8984375,
"learning_rate": 1.964288135671494e-05,
"loss": 0.5053,
"step": 50
},
{
"epoch": 0.18721412615679184,
"grad_norm": 0.84765625,
"learning_rate": 1.9568430170619953e-05,
"loss": 0.4847,
"step": 55
},
{
"epoch": 0.20423359217104564,
"grad_norm": 0.98828125,
"learning_rate": 1.94871041981242e-05,
"loss": 0.4862,
"step": 60
},
{
"epoch": 0.22125305818529944,
"grad_norm": 0.9375,
"learning_rate": 1.939896187082824e-05,
"loss": 0.4877,
"step": 65
},
{
"epoch": 0.23827252419955325,
"grad_norm": 0.92578125,
"learning_rate": 1.9304066517790465e-05,
"loss": 0.481,
"step": 70
},
{
"epoch": 0.25529199021380705,
"grad_norm": 0.953125,
"learning_rate": 1.9202486320026045e-05,
"loss": 0.4754,
"step": 75
},
{
"epoch": 0.27231145622806086,
"grad_norm": 0.96875,
"learning_rate": 1.9094294261519787e-05,
"loss": 0.4881,
"step": 80
},
{
"epoch": 0.28933092224231466,
"grad_norm": 0.8203125,
"learning_rate": 1.897956807678813e-05,
"loss": 0.4884,
"step": 85
},
{
"epoch": 0.30635038825656846,
"grad_norm": 0.87890625,
"learning_rate": 1.8858390195027986e-05,
"loss": 0.4759,
"step": 90
},
{
"epoch": 0.32336985427082227,
"grad_norm": 0.93359375,
"learning_rate": 1.873084768089246e-05,
"loss": 0.4678,
"step": 95
},
{
"epoch": 0.34038932028507607,
"grad_norm": 0.87890625,
"learning_rate": 1.8597032171936103e-05,
"loss": 0.4744,
"step": 100
},
{
"epoch": 0.3574087862993299,
"grad_norm": 0.8359375,
"learning_rate": 1.8457039812774574e-05,
"loss": 0.4693,
"step": 105
},
{
"epoch": 0.3744282523135837,
"grad_norm": 0.8671875,
"learning_rate": 1.831097118600604e-05,
"loss": 0.4697,
"step": 110
},
{
"epoch": 0.3914477183278375,
"grad_norm": 0.89453125,
"learning_rate": 1.8158931239943957e-05,
"loss": 0.4524,
"step": 115
},
{
"epoch": 0.4084671843420913,
"grad_norm": 0.9140625,
"learning_rate": 1.800102921321316e-05,
"loss": 0.4487,
"step": 120
},
{
"epoch": 0.4254866503563451,
"grad_norm": 0.9296875,
"learning_rate": 1.7837378556263368e-05,
"loss": 0.4683,
"step": 125
},
{
"epoch": 0.4425061163705989,
"grad_norm": 0.98828125,
"learning_rate": 1.766809684985661e-05,
"loss": 0.4567,
"step": 130
},
{
"epoch": 0.4595255823848527,
"grad_norm": 0.90625,
"learning_rate": 1.7493305720587047e-05,
"loss": 0.4682,
"step": 135
},
{
"epoch": 0.4765450483991065,
"grad_norm": 0.9609375,
"learning_rate": 1.7313130753493917e-05,
"loss": 0.4649,
"step": 140
},
{
"epoch": 0.4935645144133603,
"grad_norm": 0.92578125,
"learning_rate": 1.7127701401830422e-05,
"loss": 0.4412,
"step": 145
},
{
"epoch": 0.5105839804276141,
"grad_norm": 0.88671875,
"learning_rate": 1.6937150894053306e-05,
"loss": 0.4505,
"step": 150
},
{
"epoch": 0.5276034464418679,
"grad_norm": 0.890625,
"learning_rate": 1.674161613810003e-05,
"loss": 0.4655,
"step": 155
},
{
"epoch": 0.5446229124561217,
"grad_norm": 0.96484375,
"learning_rate": 1.6541237623022333e-05,
"loss": 0.4536,
"step": 160
},
{
"epoch": 0.5616423784703755,
"grad_norm": 0.84765625,
"learning_rate": 1.6336159318046698e-05,
"loss": 0.4398,
"step": 165
},
{
"epoch": 0.5786618444846293,
"grad_norm": 0.84375,
"learning_rate": 1.612652856913449e-05,
"loss": 0.4548,
"step": 170
},
{
"epoch": 0.5956813104988831,
"grad_norm": 0.9453125,
"learning_rate": 1.5912495993115848e-05,
"loss": 0.4456,
"step": 175
},
{
"epoch": 0.6127007765131369,
"grad_norm": 0.953125,
"learning_rate": 1.5694215369473584e-05,
"loss": 0.4466,
"step": 180
},
{
"epoch": 0.6297202425273907,
"grad_norm": 0.93359375,
"learning_rate": 1.547184352985472e-05,
"loss": 0.4353,
"step": 185
},
{
"epoch": 0.6467397085416445,
"grad_norm": 0.9296875,
"learning_rate": 1.5245540245389053e-05,
"loss": 0.426,
"step": 190
},
{
"epoch": 0.6637591745558983,
"grad_norm": 0.93359375,
"learning_rate": 1.501546811189584e-05,
"loss": 0.4273,
"step": 195
},
{
"epoch": 0.6807786405701521,
"grad_norm": 0.875,
"learning_rate": 1.4781792433060884e-05,
"loss": 0.4401,
"step": 200
},
{
"epoch": 0.6977981065844059,
"grad_norm": 0.96875,
"learning_rate": 1.4544681101668099e-05,
"loss": 0.423,
"step": 205
},
{
"epoch": 0.7148175725986597,
"grad_norm": 0.90234375,
"learning_rate": 1.4304304478970839e-05,
"loss": 0.4402,
"step": 210
},
{
"epoch": 0.7318370386129135,
"grad_norm": 0.90625,
"learning_rate": 1.4060835272289673e-05,
"loss": 0.4352,
"step": 215
},
{
"epoch": 0.7488565046271674,
"grad_norm": 0.9375,
"learning_rate": 1.381444841092452e-05,
"loss": 0.434,
"step": 220
},
{
"epoch": 0.7658759706414211,
"grad_norm": 0.9140625,
"learning_rate": 1.3565320920470348e-05,
"loss": 0.4287,
"step": 225
},
{
"epoch": 0.782895436655675,
"grad_norm": 0.87890625,
"learning_rate": 1.3313631795626691e-05,
"loss": 0.431,
"step": 230
},
{
"epoch": 0.7999149026699287,
"grad_norm": 0.875,
"learning_rate": 1.3059561871592413e-05,
"loss": 0.4329,
"step": 235
},
{
"epoch": 0.8169343686841826,
"grad_norm": 0.90625,
"learning_rate": 1.2803293694138077e-05,
"loss": 0.4258,
"step": 240
},
{
"epoch": 0.8339538346984363,
"grad_norm": 0.90234375,
"learning_rate": 1.254501138844931e-05,
"loss": 0.4118,
"step": 245
},
{
"epoch": 0.8509733007126902,
"grad_norm": 0.89453125,
"learning_rate": 1.228490052683537e-05,
"loss": 0.4249,
"step": 250
},
{
"epoch": 0.8679927667269439,
"grad_norm": 0.84765625,
"learning_rate": 1.202314799539797e-05,
"loss": 0.4339,
"step": 255
},
{
"epoch": 0.8850122327411978,
"grad_norm": 0.84375,
"learning_rate": 1.1759941859756173e-05,
"loss": 0.4336,
"step": 260
},
{
"epoch": 0.9020316987554515,
"grad_norm": 0.9140625,
"learning_rate": 1.1495471229923805e-05,
"loss": 0.435,
"step": 265
},
{
"epoch": 0.9190511647697054,
"grad_norm": 0.875,
"learning_rate": 1.1229926124436506e-05,
"loss": 0.4182,
"step": 270
},
{
"epoch": 0.9360706307839591,
"grad_norm": 0.8984375,
"learning_rate": 1.0963497333826002e-05,
"loss": 0.4221,
"step": 275
},
{
"epoch": 0.953090096798213,
"grad_norm": 0.91015625,
"learning_rate": 1.0696376283539704e-05,
"loss": 0.4157,
"step": 280
},
{
"epoch": 0.9701095628124667,
"grad_norm": 0.89453125,
"learning_rate": 1.042875489640415e-05,
"loss": 0.4402,
"step": 285
},
{
"epoch": 0.9871290288267206,
"grad_norm": 0.9375,
"learning_rate": 1.0160825454731072e-05,
"loss": 0.425,
"step": 290
},
{
"epoch": 1.0041484948409745,
"grad_norm": 0.8515625,
"learning_rate": 9.892780462165172e-06,
"loss": 0.4046,
"step": 295
},
{
"epoch": 1.0211679608552282,
"grad_norm": 0.890625,
"learning_rate": 9.624812505372907e-06,
"loss": 0.4138,
"step": 300
},
{
"epoch": 1.038187426869482,
"grad_norm": 0.84765625,
"learning_rate": 9.35711411567156e-06,
"loss": 0.3996,
"step": 305
},
{
"epoch": 1.0552068928837357,
"grad_norm": 0.88671875,
"learning_rate": 9.089877630698159e-06,
"loss": 0.4033,
"step": 310
},
{
"epoch": 1.0722263588979897,
"grad_norm": 0.828125,
"learning_rate": 8.823295056217487e-06,
"loss": 0.4152,
"step": 315
},
{
"epoch": 1.0892458249122434,
"grad_norm": 0.859375,
"learning_rate": 8.557557928168568e-06,
"loss": 0.3985,
"step": 320
},
{
"epoch": 1.1062652909264972,
"grad_norm": 0.9140625,
"learning_rate": 8.292857175048715e-06,
"loss": 0.3984,
"step": 325
},
{
"epoch": 1.123284756940751,
"grad_norm": 0.8828125,
"learning_rate": 8.029382980734e-06,
"loss": 0.4165,
"step": 330
},
{
"epoch": 1.1403042229550047,
"grad_norm": 0.8828125,
"learning_rate": 7.767324647834759e-06,
"loss": 0.3934,
"step": 335
},
{
"epoch": 1.1573236889692586,
"grad_norm": 0.82421875,
"learning_rate": 7.506870461684215e-06,
"loss": 0.4083,
"step": 340
},
{
"epoch": 1.1743431549835124,
"grad_norm": 0.84765625,
"learning_rate": 7.24820755505808e-06,
"loss": 0.4071,
"step": 345
},
{
"epoch": 1.1913626209977661,
"grad_norm": 0.85546875,
"learning_rate": 6.991521773722186e-06,
"loss": 0.4187,
"step": 350
},
{
"epoch": 1.20838208701202,
"grad_norm": 0.95703125,
"learning_rate": 6.736997542904844e-06,
"loss": 0.4026,
"step": 355
},
{
"epoch": 1.2254015530262738,
"grad_norm": 0.8671875,
"learning_rate": 6.484817734789839e-06,
"loss": 0.4099,
"step": 360
},
{
"epoch": 1.2424210190405276,
"grad_norm": 0.8125,
"learning_rate": 6.235163537125257e-06,
"loss": 0.3996,
"step": 365
},
{
"epoch": 1.2594404850547813,
"grad_norm": 0.95703125,
"learning_rate": 5.988214323042581e-06,
"loss": 0.397,
"step": 370
},
{
"epoch": 1.276459951069035,
"grad_norm": 0.8828125,
"learning_rate": 5.744147522179515e-06,
"loss": 0.3983,
"step": 375
},
{
"epoch": 1.293479417083289,
"grad_norm": 0.87109375,
"learning_rate": 5.503138493199247e-06,
"loss": 0.4045,
"step": 380
},
{
"epoch": 1.3104988830975428,
"grad_norm": 0.8125,
"learning_rate": 5.26536039779761e-06,
"loss": 0.4186,
"step": 385
},
{
"epoch": 1.3275183491117966,
"grad_norm": 0.88671875,
"learning_rate": 5.030984076288805e-06,
"loss": 0.4073,
"step": 390
},
{
"epoch": 1.3445378151260505,
"grad_norm": 0.90234375,
"learning_rate": 4.800177924858933e-06,
"loss": 0.4093,
"step": 395
},
{
"epoch": 1.3615572811403043,
"grad_norm": 0.8828125,
"learning_rate": 4.573107774575665e-06,
"loss": 0.4009,
"step": 400
},
{
"epoch": 1.378576747154558,
"grad_norm": 0.88671875,
"learning_rate": 4.34993677224086e-06,
"loss": 0.3877,
"step": 405
},
{
"epoch": 1.3955962131688118,
"grad_norm": 0.890625,
"learning_rate": 4.130825263171833e-06,
"loss": 0.4145,
"step": 410
},
{
"epoch": 1.4126156791830655,
"grad_norm": 0.8359375,
"learning_rate": 3.915930675995424e-06,
"loss": 0.409,
"step": 415
},
{
"epoch": 1.4296351451973195,
"grad_norm": 0.9296875,
"learning_rate": 3.7054074095376845e-06,
"loss": 0.412,
"step": 420
},
{
"epoch": 1.4466546112115732,
"grad_norm": 0.88671875,
"learning_rate": 3.4994067218904383e-06,
"loss": 0.4094,
"step": 425
},
{
"epoch": 1.463674077225827,
"grad_norm": 0.96875,
"learning_rate": 3.2980766217343852e-06,
"loss": 0.4011,
"step": 430
},
{
"epoch": 1.480693543240081,
"grad_norm": 0.85546875,
"learning_rate": 3.101561761996912e-06,
"loss": 0.4041,
"step": 435
},
{
"epoch": 1.4977130092543347,
"grad_norm": 0.8515625,
"learning_rate": 2.910003335920918e-06,
"loss": 0.4129,
"step": 440
},
{
"epoch": 1.5147324752685885,
"grad_norm": 0.87890625,
"learning_rate": 2.7235389756194097e-06,
"loss": 0.4021,
"step": 445
},
{
"epoch": 1.5317519412828422,
"grad_norm": 0.875,
"learning_rate": 2.542302653188704e-06,
"loss": 0.409,
"step": 450
},
{
"epoch": 1.548771407297096,
"grad_norm": 0.921875,
"learning_rate": 2.3664245844513078e-06,
"loss": 0.3986,
"step": 455
},
{
"epoch": 1.5657908733113497,
"grad_norm": 0.8828125,
"learning_rate": 2.1960311353976318e-06,
"loss": 0.4126,
"step": 460
},
{
"epoch": 1.5828103393256037,
"grad_norm": 0.87890625,
"learning_rate": 2.031244731393739e-06,
"loss": 0.4007,
"step": 465
},
{
"epoch": 1.5998298053398574,
"grad_norm": 0.8359375,
"learning_rate": 1.8721837692204115e-06,
"loss": 0.3942,
"step": 470
},
{
"epoch": 1.6168492713541114,
"grad_norm": 0.84375,
"learning_rate": 1.7189625320066573e-06,
"loss": 0.3918,
"step": 475
},
{
"epoch": 1.6338687373683651,
"grad_norm": 0.8515625,
"learning_rate": 1.5716911071188611e-06,
"loss": 0.4207,
"step": 480
},
{
"epoch": 1.6508882033826189,
"grad_norm": 0.83203125,
"learning_rate": 1.4304753070645083e-06,
"loss": 0.3924,
"step": 485
},
{
"epoch": 1.6679076693968726,
"grad_norm": 0.84765625,
"learning_rate": 1.295416593467338e-06,
"loss": 0.3986,
"step": 490
},
{
"epoch": 1.6849271354111264,
"grad_norm": 0.859375,
"learning_rate": 1.166612004168568e-06,
"loss": 0.4055,
"step": 495
},
{
"epoch": 1.7019466014253801,
"grad_norm": 0.98046875,
"learning_rate": 1.0441540835065101e-06,
"loss": 0.3996,
"step": 500
},
{
"epoch": 1.718966067439634,
"grad_norm": 0.8359375,
"learning_rate": 9.28130815824746e-07,
"loss": 0.411,
"step": 505
},
{
"epoch": 1.7359855334538878,
"grad_norm": 0.8359375,
"learning_rate": 8.186255622565642e-07,
"loss": 0.4101,
"step": 510
},
{
"epoch": 1.7530049994681418,
"grad_norm": 0.89453125,
"learning_rate": 7.15717000831132e-07,
"loss": 0.4132,
"step": 515
},
{
"epoch": 1.7700244654823956,
"grad_norm": 0.875,
"learning_rate": 6.19479069944402e-07,
"loss": 0.4109,
"step": 520
},
{
"epoch": 1.7870439314966493,
"grad_norm": 0.8828125,
"learning_rate": 5.299809152353863e-07,
"loss": 0.4106,
"step": 525
},
{
"epoch": 1.804063397510903,
"grad_norm": 0.8671875,
"learning_rate": 4.4728683990596267e-07,
"loss": 0.3986,
"step": 530
},
{
"epoch": 1.8210828635251568,
"grad_norm": 0.8671875,
"learning_rate": 3.714562585198922e-07,
"loss": 0.4101,
"step": 535
},
{
"epoch": 1.8381023295394106,
"grad_norm": 0.9296875,
"learning_rate": 3.025436543142801e-07,
"loss": 0.3974,
"step": 540
},
{
"epoch": 1.8551217955536645,
"grad_norm": 0.93359375,
"learning_rate": 2.4059854005410224e-07,
"loss": 0.4048,
"step": 545
},
{
"epoch": 1.8721412615679183,
"grad_norm": 0.90625,
"learning_rate": 1.856654224579635e-07,
"loss": 0.4075,
"step": 550
},
{
"epoch": 1.8891607275821722,
"grad_norm": 0.9375,
"learning_rate": 1.3778377022063083e-07,
"loss": 0.4045,
"step": 555
},
{
"epoch": 1.906180193596426,
"grad_norm": 0.9140625,
"learning_rate": 9.698798565531464e-08,
"loss": 0.4043,
"step": 560
},
{
"epoch": 1.9231996596106797,
"grad_norm": 0.86328125,
"learning_rate": 6.330737997607194e-08,
"loss": 0.3944,
"step": 565
},
{
"epoch": 1.9402191256249335,
"grad_norm": 0.84375,
"learning_rate": 3.6766152238106866e-08,
"loss": 0.4082,
"step": 570
},
{
"epoch": 1.9572385916391872,
"grad_norm": 0.90234375,
"learning_rate": 1.738337195107187e-08,
"loss": 0.404,
"step": 575
},
{
"epoch": 1.974258057653441,
"grad_norm": 0.88671875,
"learning_rate": 5.172965377890915e-09,
"loss": 0.4013,
"step": 580
},
{
"epoch": 1.991277523667695,
"grad_norm": 0.87890625,
"learning_rate": 1.4370552892883916e-10,
"loss": 0.4101,
"step": 585
},
{
"epoch": 1.9946814168705456,
"step": 586,
"total_flos": 2.1138563148357304e+18,
"train_loss": 0.4405478376373903,
"train_runtime": 25027.7179,
"train_samples_per_second": 3.005,
"train_steps_per_second": 0.023
}
],
"logging_steps": 5,
"max_steps": 586,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.1138563148357304e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}