kodcode-v1-checkpoint-112 / trainer_state.json
moogician's picture
Upload trainer_state.json with huggingface_hub
7d18ea0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.144796380090498,
"eval_steps": 500,
"global_step": 112,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03619909502262444,
"grad_norm": 2.867363214492798,
"learning_rate": 3.7037037037037036e-07,
"loss": 0.7721,
"step": 1
},
{
"epoch": 0.07239819004524888,
"grad_norm": 2.795196771621704,
"learning_rate": 7.407407407407407e-07,
"loss": 0.7534,
"step": 2
},
{
"epoch": 0.1085972850678733,
"grad_norm": 2.8518760204315186,
"learning_rate": 1.111111111111111e-06,
"loss": 0.7531,
"step": 3
},
{
"epoch": 0.14479638009049775,
"grad_norm": 2.7276031970977783,
"learning_rate": 1.4814814814814815e-06,
"loss": 0.7604,
"step": 4
},
{
"epoch": 0.18099547511312217,
"grad_norm": 2.6922106742858887,
"learning_rate": 1.8518518518518519e-06,
"loss": 0.7775,
"step": 5
},
{
"epoch": 0.2171945701357466,
"grad_norm": 2.8505051136016846,
"learning_rate": 2.222222222222222e-06,
"loss": 0.7689,
"step": 6
},
{
"epoch": 0.25339366515837103,
"grad_norm": 2.711665153503418,
"learning_rate": 2.5925925925925925e-06,
"loss": 0.7687,
"step": 7
},
{
"epoch": 0.2895927601809955,
"grad_norm": 2.7716190814971924,
"learning_rate": 2.962962962962963e-06,
"loss": 0.769,
"step": 8
},
{
"epoch": 0.3257918552036199,
"grad_norm": 2.607780933380127,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.7674,
"step": 9
},
{
"epoch": 0.36199095022624433,
"grad_norm": 2.305697202682495,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.7529,
"step": 10
},
{
"epoch": 0.39819004524886875,
"grad_norm": 1.5863313674926758,
"learning_rate": 4.074074074074074e-06,
"loss": 0.7259,
"step": 11
},
{
"epoch": 0.4343891402714932,
"grad_norm": 1.2682443857192993,
"learning_rate": 4.444444444444444e-06,
"loss": 0.7206,
"step": 12
},
{
"epoch": 0.47058823529411764,
"grad_norm": 1.010533332824707,
"learning_rate": 4.814814814814815e-06,
"loss": 0.6921,
"step": 13
},
{
"epoch": 0.5067873303167421,
"grad_norm": 0.9136636257171631,
"learning_rate": 5.185185185185185e-06,
"loss": 0.6987,
"step": 14
},
{
"epoch": 0.5429864253393665,
"grad_norm": 1.275439739227295,
"learning_rate": 5.555555555555557e-06,
"loss": 0.6997,
"step": 15
},
{
"epoch": 0.579185520361991,
"grad_norm": 1.4839826822280884,
"learning_rate": 5.925925925925926e-06,
"loss": 0.6773,
"step": 16
},
{
"epoch": 0.6153846153846154,
"grad_norm": 1.5695866346359253,
"learning_rate": 6.296296296296297e-06,
"loss": 0.6582,
"step": 17
},
{
"epoch": 0.6515837104072398,
"grad_norm": 1.6517796516418457,
"learning_rate": 6.666666666666667e-06,
"loss": 0.6912,
"step": 18
},
{
"epoch": 0.6877828054298643,
"grad_norm": 1.3464380502700806,
"learning_rate": 7.0370370370370375e-06,
"loss": 0.6636,
"step": 19
},
{
"epoch": 0.7239819004524887,
"grad_norm": 1.0865730047225952,
"learning_rate": 7.4074074074074075e-06,
"loss": 0.6571,
"step": 20
},
{
"epoch": 0.7601809954751131,
"grad_norm": 0.8862206339836121,
"learning_rate": 7.77777777777778e-06,
"loss": 0.6583,
"step": 21
},
{
"epoch": 0.7963800904977375,
"grad_norm": 0.9783180952072144,
"learning_rate": 8.148148148148148e-06,
"loss": 0.6688,
"step": 22
},
{
"epoch": 0.832579185520362,
"grad_norm": 0.7160141468048096,
"learning_rate": 8.518518518518519e-06,
"loss": 0.6408,
"step": 23
},
{
"epoch": 0.8687782805429864,
"grad_norm": 0.6991405487060547,
"learning_rate": 8.888888888888888e-06,
"loss": 0.6447,
"step": 24
},
{
"epoch": 0.9049773755656109,
"grad_norm": 0.7763178944587708,
"learning_rate": 9.25925925925926e-06,
"loss": 0.6512,
"step": 25
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.5905259847640991,
"learning_rate": 9.62962962962963e-06,
"loss": 0.6337,
"step": 26
},
{
"epoch": 0.9773755656108597,
"grad_norm": 0.7515297532081604,
"learning_rate": 1e-05,
"loss": 0.655,
"step": 27
},
{
"epoch": 1.0361990950226245,
"grad_norm": 1.260597825050354,
"learning_rate": 9.999582149277188e-06,
"loss": 1.2675,
"step": 28
},
{
"epoch": 1.0723981900452488,
"grad_norm": 0.5624723434448242,
"learning_rate": 9.998328666948437e-06,
"loss": 0.5869,
"step": 29
},
{
"epoch": 1.1085972850678734,
"grad_norm": 0.5378224849700928,
"learning_rate": 9.996239762521152e-06,
"loss": 0.5919,
"step": 30
},
{
"epoch": 1.1447963800904977,
"grad_norm": 0.48451751470565796,
"learning_rate": 9.993315785135417e-06,
"loss": 0.5952,
"step": 31
},
{
"epoch": 1.1809954751131222,
"grad_norm": 0.4453476071357727,
"learning_rate": 9.989557223505661e-06,
"loss": 0.5941,
"step": 32
},
{
"epoch": 1.2171945701357467,
"grad_norm": 0.5747421979904175,
"learning_rate": 9.98496470583896e-06,
"loss": 0.6023,
"step": 33
},
{
"epoch": 1.253393665158371,
"grad_norm": 0.4679132401943207,
"learning_rate": 9.979538999730047e-06,
"loss": 0.5768,
"step": 34
},
{
"epoch": 1.2895927601809956,
"grad_norm": 0.4853627383708954,
"learning_rate": 9.973281012033009e-06,
"loss": 0.5566,
"step": 35
},
{
"epoch": 1.3257918552036199,
"grad_norm": 0.44682788848876953,
"learning_rate": 9.966191788709716e-06,
"loss": 0.5981,
"step": 36
},
{
"epoch": 1.3619909502262444,
"grad_norm": 0.4072805941104889,
"learning_rate": 9.958272514655006e-06,
"loss": 0.5664,
"step": 37
},
{
"epoch": 1.3981900452488687,
"grad_norm": 0.35199496150016785,
"learning_rate": 9.949524513498636e-06,
"loss": 0.5864,
"step": 38
},
{
"epoch": 1.4343891402714932,
"grad_norm": 0.420901894569397,
"learning_rate": 9.939949247384046e-06,
"loss": 0.5558,
"step": 39
},
{
"epoch": 1.4705882352941178,
"grad_norm": 0.39620932936668396,
"learning_rate": 9.929548316723983e-06,
"loss": 0.5713,
"step": 40
},
{
"epoch": 1.506787330316742,
"grad_norm": 0.36159488558769226,
"learning_rate": 9.918323459933006e-06,
"loss": 0.5712,
"step": 41
},
{
"epoch": 1.5429864253393664,
"grad_norm": 0.3682953119277954,
"learning_rate": 9.906276553136924e-06,
"loss": 0.5543,
"step": 42
},
{
"epoch": 1.5791855203619911,
"grad_norm": 0.35126638412475586,
"learning_rate": 9.893409609859221e-06,
"loss": 0.5615,
"step": 43
},
{
"epoch": 1.6153846153846154,
"grad_norm": 0.38464972376823425,
"learning_rate": 9.879724780684518e-06,
"loss": 0.5635,
"step": 44
},
{
"epoch": 1.6515837104072397,
"grad_norm": 0.37480273842811584,
"learning_rate": 9.86522435289912e-06,
"loss": 0.5618,
"step": 45
},
{
"epoch": 1.6877828054298643,
"grad_norm": 0.388458788394928,
"learning_rate": 9.849910750108718e-06,
"loss": 0.5635,
"step": 46
},
{
"epoch": 1.7239819004524888,
"grad_norm": 0.4044710099697113,
"learning_rate": 9.833786531833311e-06,
"loss": 0.5665,
"step": 47
},
{
"epoch": 1.760180995475113,
"grad_norm": 0.3484383821487427,
"learning_rate": 9.816854393079402e-06,
"loss": 0.5522,
"step": 48
},
{
"epoch": 1.7963800904977374,
"grad_norm": 0.33232375979423523,
"learning_rate": 9.79911716388956e-06,
"loss": 0.5712,
"step": 49
},
{
"epoch": 1.8325791855203621,
"grad_norm": 0.4234112501144409,
"learning_rate": 9.7805778088694e-06,
"loss": 0.5496,
"step": 50
},
{
"epoch": 1.8687782805429864,
"grad_norm": 0.3586530387401581,
"learning_rate": 9.761239426692077e-06,
"loss": 0.5488,
"step": 51
},
{
"epoch": 1.9049773755656108,
"grad_norm": 0.34319135546684265,
"learning_rate": 9.741105249580383e-06,
"loss": 0.5534,
"step": 52
},
{
"epoch": 1.9411764705882353,
"grad_norm": 0.406502366065979,
"learning_rate": 9.7201786427665e-06,
"loss": 0.5765,
"step": 53
},
{
"epoch": 1.9773755656108598,
"grad_norm": 0.3931889533996582,
"learning_rate": 9.698463103929542e-06,
"loss": 0.561,
"step": 54
},
{
"epoch": 2.0361990950226243,
"grad_norm": 0.7969871759414673,
"learning_rate": 9.67596226261095e-06,
"loss": 0.9762,
"step": 55
},
{
"epoch": 2.072398190045249,
"grad_norm": 0.34094685316085815,
"learning_rate": 9.652679879607843e-06,
"loss": 0.5055,
"step": 56
},
{
"epoch": 2.1085972850678734,
"grad_norm": 0.38012585043907166,
"learning_rate": 9.628619846344453e-06,
"loss": 0.4799,
"step": 57
},
{
"epoch": 2.1447963800904977,
"grad_norm": 0.3892729580402374,
"learning_rate": 9.603786184221693e-06,
"loss": 0.4758,
"step": 58
},
{
"epoch": 2.180995475113122,
"grad_norm": 0.47513318061828613,
"learning_rate": 9.578183043945031e-06,
"loss": 0.4635,
"step": 59
},
{
"epoch": 2.2171945701357467,
"grad_norm": 0.3308075964450836,
"learning_rate": 9.551814704830734e-06,
"loss": 0.4698,
"step": 60
},
{
"epoch": 2.253393665158371,
"grad_norm": 0.3714766800403595,
"learning_rate": 9.524685574090627e-06,
"loss": 0.478,
"step": 61
},
{
"epoch": 2.2895927601809953,
"grad_norm": 0.37184762954711914,
"learning_rate": 9.496800186095466e-06,
"loss": 0.4802,
"step": 62
},
{
"epoch": 2.32579185520362,
"grad_norm": 0.4524374306201935,
"learning_rate": 9.468163201617063e-06,
"loss": 0.4848,
"step": 63
},
{
"epoch": 2.3619909502262444,
"grad_norm": 0.3513346016407013,
"learning_rate": 9.438779407049282e-06,
"loss": 0.4408,
"step": 64
},
{
"epoch": 2.3981900452488687,
"grad_norm": 0.3224553167819977,
"learning_rate": 9.40865371360804e-06,
"loss": 0.4523,
"step": 65
},
{
"epoch": 2.4343891402714934,
"grad_norm": 0.3933435380458832,
"learning_rate": 9.377791156510456e-06,
"loss": 0.458,
"step": 66
},
{
"epoch": 2.4705882352941178,
"grad_norm": 0.3878864645957947,
"learning_rate": 9.346196894133239e-06,
"loss": 0.4732,
"step": 67
},
{
"epoch": 2.506787330316742,
"grad_norm": 0.44199082255363464,
"learning_rate": 9.313876207150544e-06,
"loss": 0.4412,
"step": 68
},
{
"epoch": 2.5429864253393664,
"grad_norm": 0.3837670087814331,
"learning_rate": 9.280834497651334e-06,
"loss": 0.4461,
"step": 69
},
{
"epoch": 2.579185520361991,
"grad_norm": 0.34122994542121887,
"learning_rate": 9.247077288236488e-06,
"loss": 0.4565,
"step": 70
},
{
"epoch": 2.6153846153846154,
"grad_norm": 0.344008207321167,
"learning_rate": 9.212610221095748e-06,
"loss": 0.4317,
"step": 71
},
{
"epoch": 2.6515837104072397,
"grad_norm": 0.3702448606491089,
"learning_rate": 9.177439057064684e-06,
"loss": 0.4384,
"step": 72
},
{
"epoch": 2.6877828054298645,
"grad_norm": 0.3998362720012665,
"learning_rate": 9.141569674661816e-06,
"loss": 0.4646,
"step": 73
},
{
"epoch": 2.723981900452489,
"grad_norm": 0.4177471101284027,
"learning_rate": 9.105008069106093e-06,
"loss": 0.4555,
"step": 74
},
{
"epoch": 2.760180995475113,
"grad_norm": 0.3557385802268982,
"learning_rate": 9.067760351314838e-06,
"loss": 0.451,
"step": 75
},
{
"epoch": 2.7963800904977374,
"grad_norm": 0.36134663224220276,
"learning_rate": 9.029832746882372e-06,
"loss": 0.4339,
"step": 76
},
{
"epoch": 2.832579185520362,
"grad_norm": 0.3386276960372925,
"learning_rate": 8.991231595039464e-06,
"loss": 0.4272,
"step": 77
},
{
"epoch": 2.8687782805429864,
"grad_norm": 0.3196982443332672,
"learning_rate": 8.951963347593797e-06,
"loss": 0.4476,
"step": 78
},
{
"epoch": 2.9049773755656108,
"grad_norm": 0.3494734764099121,
"learning_rate": 8.9120345678516e-06,
"loss": 0.4669,
"step": 79
},
{
"epoch": 2.9411764705882355,
"grad_norm": 0.369515597820282,
"learning_rate": 8.871451929520662e-06,
"loss": 0.4459,
"step": 80
},
{
"epoch": 2.97737556561086,
"grad_norm": 0.3368425965309143,
"learning_rate": 8.83022221559489e-06,
"loss": 0.4641,
"step": 81
},
{
"epoch": 3.0361990950226243,
"grad_norm": 0.778194010257721,
"learning_rate": 8.78835231722059e-06,
"loss": 0.8712,
"step": 82
},
{
"epoch": 3.072398190045249,
"grad_norm": 0.5022688508033752,
"learning_rate": 8.74584923254468e-06,
"loss": 0.3798,
"step": 83
},
{
"epoch": 3.1085972850678734,
"grad_norm": 0.42150193452835083,
"learning_rate": 8.702720065545024e-06,
"loss": 0.3818,
"step": 84
},
{
"epoch": 3.1447963800904977,
"grad_norm": 0.4261153042316437,
"learning_rate": 8.658972024843063e-06,
"loss": 0.3746,
"step": 85
},
{
"epoch": 3.180995475113122,
"grad_norm": 0.4205586314201355,
"learning_rate": 8.614612422498965e-06,
"loss": 0.3523,
"step": 86
},
{
"epoch": 3.2171945701357467,
"grad_norm": 0.43494337797164917,
"learning_rate": 8.569648672789496e-06,
"loss": 0.3459,
"step": 87
},
{
"epoch": 3.253393665158371,
"grad_norm": 0.408480703830719,
"learning_rate": 8.524088290968781e-06,
"loss": 0.3362,
"step": 88
},
{
"epoch": 3.2895927601809953,
"grad_norm": 0.3939341902732849,
"learning_rate": 8.477938892012209e-06,
"loss": 0.3394,
"step": 89
},
{
"epoch": 3.32579185520362,
"grad_norm": 0.41535684466362,
"learning_rate": 8.43120818934367e-06,
"loss": 0.3434,
"step": 90
},
{
"epoch": 3.3619909502262444,
"grad_norm": 0.4341293275356293,
"learning_rate": 8.38390399354631e-06,
"loss": 0.3558,
"step": 91
},
{
"epoch": 3.3981900452488687,
"grad_norm": 0.3654901683330536,
"learning_rate": 8.336034211057098e-06,
"loss": 0.3394,
"step": 92
},
{
"epoch": 3.4343891402714934,
"grad_norm": 0.3632822632789612,
"learning_rate": 8.28760684284532e-06,
"loss": 0.352,
"step": 93
},
{
"epoch": 3.4705882352941178,
"grad_norm": 0.3629053235054016,
"learning_rate": 8.238629983075296e-06,
"loss": 0.3314,
"step": 94
},
{
"epoch": 3.506787330316742,
"grad_norm": 0.4233010709285736,
"learning_rate": 8.18911181775353e-06,
"loss": 0.346,
"step": 95
},
{
"epoch": 3.5429864253393664,
"grad_norm": 0.36843806505203247,
"learning_rate": 8.139060623360494e-06,
"loss": 0.3608,
"step": 96
},
{
"epoch": 3.579185520361991,
"grad_norm": 0.367023229598999,
"learning_rate": 8.088484765467286e-06,
"loss": 0.3555,
"step": 97
},
{
"epoch": 3.6153846153846154,
"grad_norm": 0.3499699831008911,
"learning_rate": 8.037392697337418e-06,
"loss": 0.3274,
"step": 98
},
{
"epoch": 3.6515837104072397,
"grad_norm": 0.35652127861976624,
"learning_rate": 7.985792958513932e-06,
"loss": 0.3565,
"step": 99
},
{
"epoch": 3.6877828054298645,
"grad_norm": 0.38484910130500793,
"learning_rate": 7.93369417339209e-06,
"loss": 0.331,
"step": 100
},
{
"epoch": 3.723981900452489,
"grad_norm": 0.37475016713142395,
"learning_rate": 7.881105049777902e-06,
"loss": 0.3454,
"step": 101
},
{
"epoch": 3.760180995475113,
"grad_norm": 0.33727630972862244,
"learning_rate": 7.828034377432694e-06,
"loss": 0.3645,
"step": 102
},
{
"epoch": 3.7963800904977374,
"grad_norm": 0.3419555723667145,
"learning_rate": 7.774491026603985e-06,
"loss": 0.3574,
"step": 103
},
{
"epoch": 3.832579185520362,
"grad_norm": 0.5590242147445679,
"learning_rate": 7.720483946542913e-06,
"loss": 0.334,
"step": 104
},
{
"epoch": 3.8687782805429864,
"grad_norm": 0.36669477820396423,
"learning_rate": 7.666022164008458e-06,
"loss": 0.326,
"step": 105
},
{
"epoch": 3.9049773755656108,
"grad_norm": 0.3452495038509369,
"learning_rate": 7.6111147817586925e-06,
"loss": 0.3542,
"step": 106
},
{
"epoch": 3.9411764705882355,
"grad_norm": 0.3405420482158661,
"learning_rate": 7.5557709770293664e-06,
"loss": 0.3582,
"step": 107
},
{
"epoch": 3.97737556561086,
"grad_norm": 0.3876648545265198,
"learning_rate": 7.500000000000001e-06,
"loss": 0.3284,
"step": 108
},
{
"epoch": 4.036199095022624,
"grad_norm": 0.8777516484260559,
"learning_rate": 7.443811172247822e-06,
"loss": 0.5983,
"step": 109
},
{
"epoch": 4.072398190045249,
"grad_norm": 0.5651780366897583,
"learning_rate": 7.387213885189746e-06,
"loss": 0.2736,
"step": 110
},
{
"epoch": 4.108597285067873,
"grad_norm": 0.4696848392486572,
"learning_rate": 7.330217598512696e-06,
"loss": 0.2527,
"step": 111
},
{
"epoch": 4.144796380090498,
"grad_norm": 0.41402825713157654,
"learning_rate": 7.2728318385925035e-06,
"loss": 0.247,
"step": 112
}
],
"logging_steps": 1,
"max_steps": 270,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 28,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 141386024878080.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}