phi3m0128-wds-0.9-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-600
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.4843592330978809, | |
"eval_steps": 50, | |
"global_step": 600, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008072653884964682, | |
"grad_norm": 0.04381619393825531, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 14.719314575195312, | |
"logits/rejected": 15.156938552856445, | |
"logps/chosen": -0.2856016755104065, | |
"logps/rejected": -0.31895095109939575, | |
"loss": 0.9242, | |
"rewards/accuracies": 0.4375, | |
"rewards/chosen": -0.42840251326560974, | |
"rewards/margins": 0.050023891031742096, | |
"rewards/rejected": -0.47842639684677124, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.016145307769929364, | |
"grad_norm": 0.05155143886804581, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.559402465820312, | |
"logits/rejected": 15.32939338684082, | |
"logps/chosen": -0.2736968398094177, | |
"logps/rejected": -0.3458033502101898, | |
"loss": 0.9127, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.4105452597141266, | |
"rewards/margins": 0.10815979540348053, | |
"rewards/rejected": -0.5187050104141235, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.024217961654894045, | |
"grad_norm": 0.05071854218840599, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.653738021850586, | |
"logits/rejected": 15.168347358703613, | |
"logps/chosen": -0.2985997200012207, | |
"logps/rejected": -0.34624338150024414, | |
"loss": 0.9141, | |
"rewards/accuracies": 0.4625000059604645, | |
"rewards/chosen": -0.44789963960647583, | |
"rewards/margins": 0.07146544009447098, | |
"rewards/rejected": -0.5193650722503662, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03229061553985873, | |
"grad_norm": 0.052318744361400604, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.621539115905762, | |
"logits/rejected": 15.138806343078613, | |
"logps/chosen": -0.27971988916397095, | |
"logps/rejected": -0.360626757144928, | |
"loss": 0.9313, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4195798337459564, | |
"rewards/margins": 0.12136033922433853, | |
"rewards/rejected": -0.5409401655197144, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04036326942482341, | |
"grad_norm": 0.06900553405284882, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.308789253234863, | |
"logits/rejected": 14.605737686157227, | |
"logps/chosen": -0.2685723304748535, | |
"logps/rejected": -0.323064386844635, | |
"loss": 0.9076, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.40285855531692505, | |
"rewards/margins": 0.08173803985118866, | |
"rewards/rejected": -0.4845965802669525, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04036326942482341, | |
"eval_logits/chosen": 14.528907775878906, | |
"eval_logits/rejected": 15.016877174377441, | |
"eval_logps/chosen": -0.2801212966442108, | |
"eval_logps/rejected": -0.34862396121025085, | |
"eval_loss": 0.9108895063400269, | |
"eval_rewards/accuracies": 0.5544554591178894, | |
"eval_rewards/chosen": -0.4201819598674774, | |
"eval_rewards/margins": 0.10275395959615707, | |
"eval_rewards/rejected": -0.5229359865188599, | |
"eval_runtime": 30.01, | |
"eval_samples_per_second": 26.691, | |
"eval_steps_per_second": 3.366, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04843592330978809, | |
"grad_norm": 0.32321593165397644, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": 14.644981384277344, | |
"logits/rejected": 15.177103996276855, | |
"logps/chosen": -0.26382654905319214, | |
"logps/rejected": -0.33932510018348694, | |
"loss": 0.9204, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.3957397937774658, | |
"rewards/margins": 0.1132478266954422, | |
"rewards/rejected": -0.5089876055717468, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.056508577194752774, | |
"grad_norm": 0.07268164306879044, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": 14.562113761901855, | |
"logits/rejected": 15.092450141906738, | |
"logps/chosen": -0.2856511175632477, | |
"logps/rejected": -0.34295767545700073, | |
"loss": 0.915, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.42847663164138794, | |
"rewards/margins": 0.08595988899469376, | |
"rewards/rejected": -0.5144366025924683, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.06458123107971746, | |
"grad_norm": 0.06727313250303268, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": 13.979713439941406, | |
"logits/rejected": 14.924532890319824, | |
"logps/chosen": -0.27184560894966125, | |
"logps/rejected": -0.3679867386817932, | |
"loss": 0.9223, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.4077683985233307, | |
"rewards/margins": 0.14421164989471436, | |
"rewards/rejected": -0.5519800186157227, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07265388496468214, | |
"grad_norm": 0.06138753890991211, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": 14.366241455078125, | |
"logits/rejected": 14.924840927124023, | |
"logps/chosen": -0.2656143009662628, | |
"logps/rejected": -0.3583180606365204, | |
"loss": 0.9117, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.39842137694358826, | |
"rewards/margins": 0.13905569911003113, | |
"rewards/rejected": -0.5374771356582642, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08072653884964683, | |
"grad_norm": 0.14299456775188446, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": 14.401769638061523, | |
"logits/rejected": 14.532609939575195, | |
"logps/chosen": -0.2966740131378174, | |
"logps/rejected": -0.3347373604774475, | |
"loss": 0.9162, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.4450109899044037, | |
"rewards/margins": 0.05709508806467056, | |
"rewards/rejected": -0.5021060705184937, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08072653884964683, | |
"eval_logits/chosen": 14.096770286560059, | |
"eval_logits/rejected": 14.638699531555176, | |
"eval_logps/chosen": -0.2713560461997986, | |
"eval_logps/rejected": -0.35128629207611084, | |
"eval_loss": 0.900999128818512, | |
"eval_rewards/accuracies": 0.5643564462661743, | |
"eval_rewards/chosen": -0.4070340394973755, | |
"eval_rewards/margins": 0.11989541351795197, | |
"eval_rewards/rejected": -0.5269294381141663, | |
"eval_runtime": 29.986, | |
"eval_samples_per_second": 26.712, | |
"eval_steps_per_second": 3.368, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08879919273461151, | |
"grad_norm": 0.0759090781211853, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": 13.392621994018555, | |
"logits/rejected": 14.395462036132812, | |
"logps/chosen": -0.22954440116882324, | |
"logps/rejected": -0.36977845430374146, | |
"loss": 0.8951, | |
"rewards/accuracies": 0.699999988079071, | |
"rewards/chosen": -0.3443165421485901, | |
"rewards/margins": 0.21035107970237732, | |
"rewards/rejected": -0.5546677112579346, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.09687184661957618, | |
"grad_norm": 0.155408576130867, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": 13.7767915725708, | |
"logits/rejected": 14.654029846191406, | |
"logps/chosen": -0.2693817615509033, | |
"logps/rejected": -0.38339418172836304, | |
"loss": 0.9012, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.40407267212867737, | |
"rewards/margins": 0.17101867496967316, | |
"rewards/rejected": -0.5750913619995117, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.10494450050454086, | |
"grad_norm": 0.31760165095329285, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": 13.619878768920898, | |
"logits/rejected": 14.147298812866211, | |
"logps/chosen": -0.2787878215312958, | |
"logps/rejected": -0.35886240005493164, | |
"loss": 0.8911, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.41818171739578247, | |
"rewards/margins": 0.12011190503835678, | |
"rewards/rejected": -0.5382936000823975, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.11301715438950555, | |
"grad_norm": 0.10735614597797394, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": 13.27166748046875, | |
"logits/rejected": 13.826273918151855, | |
"logps/chosen": -0.25437131524086, | |
"logps/rejected": -0.3877837061882019, | |
"loss": 0.8915, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.3815569281578064, | |
"rewards/margins": 0.20011858642101288, | |
"rewards/rejected": -0.5816755890846252, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.12108980827447023, | |
"grad_norm": 0.12984605133533478, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": 12.981040954589844, | |
"logits/rejected": 13.055997848510742, | |
"logps/chosen": -0.27253809571266174, | |
"logps/rejected": -0.32365134358406067, | |
"loss": 0.8954, | |
"rewards/accuracies": 0.4375, | |
"rewards/chosen": -0.4088071286678314, | |
"rewards/margins": 0.07666991651058197, | |
"rewards/rejected": -0.4854770302772522, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.12108980827447023, | |
"eval_logits/chosen": 12.205692291259766, | |
"eval_logits/rejected": 12.830544471740723, | |
"eval_logps/chosen": -0.2716449201107025, | |
"eval_logps/rejected": -0.37988847494125366, | |
"eval_loss": 0.8780961036682129, | |
"eval_rewards/accuracies": 0.5841584205627441, | |
"eval_rewards/chosen": -0.40746742486953735, | |
"eval_rewards/margins": 0.16236530244350433, | |
"eval_rewards/rejected": -0.5698326826095581, | |
"eval_runtime": 30.0006, | |
"eval_samples_per_second": 26.699, | |
"eval_steps_per_second": 3.367, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.12916246215943492, | |
"grad_norm": 0.1466989368200302, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": 12.592086791992188, | |
"logits/rejected": 12.590131759643555, | |
"logps/chosen": -0.2901991307735443, | |
"logps/rejected": -0.37141314148902893, | |
"loss": 0.8814, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.4352986812591553, | |
"rewards/margins": 0.12182100117206573, | |
"rewards/rejected": -0.5571196675300598, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.13723511604439959, | |
"grad_norm": 0.15794874727725983, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": 10.324287414550781, | |
"logits/rejected": 11.248865127563477, | |
"logps/chosen": -0.24505829811096191, | |
"logps/rejected": -0.4437941610813141, | |
"loss": 0.8739, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -0.3675874173641205, | |
"rewards/margins": 0.29810377955436707, | |
"rewards/rejected": -0.6656912565231323, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.14530776992936428, | |
"grad_norm": 0.34027722477912903, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": 9.277830123901367, | |
"logits/rejected": 10.56584644317627, | |
"logps/chosen": -0.29055729508399963, | |
"logps/rejected": -0.4694874882698059, | |
"loss": 0.8732, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.43583592772483826, | |
"rewards/margins": 0.2683953046798706, | |
"rewards/rejected": -0.7042312026023865, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.15338042381432895, | |
"grad_norm": 0.21653155982494354, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": 6.692442417144775, | |
"logits/rejected": 8.371492385864258, | |
"logps/chosen": -0.2739722728729248, | |
"logps/rejected": -0.5168331265449524, | |
"loss": 0.8425, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.4109583795070648, | |
"rewards/margins": 0.36429136991500854, | |
"rewards/rejected": -0.7752498388290405, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.16145307769929365, | |
"grad_norm": 0.27401649951934814, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": 7.028637886047363, | |
"logits/rejected": 7.22598123550415, | |
"logps/chosen": -0.32309776544570923, | |
"logps/rejected": -0.5094671249389648, | |
"loss": 0.8327, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.48464661836624146, | |
"rewards/margins": 0.2795540988445282, | |
"rewards/rejected": -0.7642006874084473, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.16145307769929365, | |
"eval_logits/chosen": 6.369185924530029, | |
"eval_logits/rejected": 6.641132831573486, | |
"eval_logps/chosen": -0.32840561866760254, | |
"eval_logps/rejected": -0.5301258563995361, | |
"eval_loss": 0.8103437423706055, | |
"eval_rewards/accuracies": 0.6237623691558838, | |
"eval_rewards/chosen": -0.4926084876060486, | |
"eval_rewards/margins": 0.30258041620254517, | |
"eval_rewards/rejected": -0.795188844203949, | |
"eval_runtime": 29.9886, | |
"eval_samples_per_second": 26.71, | |
"eval_steps_per_second": 3.368, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.16952573158425832, | |
"grad_norm": 0.3183073103427887, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": 5.25254487991333, | |
"logits/rejected": 5.84013032913208, | |
"logps/chosen": -0.3624248802661896, | |
"logps/rejected": -0.6147049069404602, | |
"loss": 0.7877, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.5436373949050903, | |
"rewards/margins": 0.3784201443195343, | |
"rewards/rejected": -0.9220573306083679, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.17759838546922302, | |
"grad_norm": 0.3535729646682739, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": 4.473980903625488, | |
"logits/rejected": 3.9927191734313965, | |
"logps/chosen": -0.3647093176841736, | |
"logps/rejected": -0.6410630345344543, | |
"loss": 0.7816, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.547063946723938, | |
"rewards/margins": 0.4145306646823883, | |
"rewards/rejected": -0.9615945816040039, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.1856710393541877, | |
"grad_norm": 0.4819677174091339, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": 2.7333035469055176, | |
"logits/rejected": 2.521853446960449, | |
"logps/chosen": -0.40259629487991333, | |
"logps/rejected": -0.7537732720375061, | |
"loss": 0.7306, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -0.6038944721221924, | |
"rewards/margins": 0.5267654657363892, | |
"rewards/rejected": -1.1306599378585815, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.19374369323915236, | |
"grad_norm": 0.4125296175479889, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": 2.212467908859253, | |
"logits/rejected": 1.1434030532836914, | |
"logps/chosen": -0.4652811884880066, | |
"logps/rejected": -0.8928227424621582, | |
"loss": 0.7214, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.6979218125343323, | |
"rewards/margins": 0.6413123607635498, | |
"rewards/rejected": -1.3392341136932373, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.20181634712411706, | |
"grad_norm": 0.4265546202659607, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": 0.4756811559200287, | |
"logits/rejected": 0.07218921184539795, | |
"logps/chosen": -0.4880926012992859, | |
"logps/rejected": -1.0095646381378174, | |
"loss": 0.6811, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -0.7321388721466064, | |
"rewards/margins": 0.7822080850601196, | |
"rewards/rejected": -1.5143468379974365, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.20181634712411706, | |
"eval_logits/chosen": 1.6732138395309448, | |
"eval_logits/rejected": 0.5167235732078552, | |
"eval_logps/chosen": -0.5383209586143494, | |
"eval_logps/rejected": -1.0026048421859741, | |
"eval_loss": 0.6842760443687439, | |
"eval_rewards/accuracies": 0.6336633563041687, | |
"eval_rewards/chosen": -0.8074814677238464, | |
"eval_rewards/margins": 0.6964258551597595, | |
"eval_rewards/rejected": -1.5039072036743164, | |
"eval_runtime": 29.9884, | |
"eval_samples_per_second": 26.71, | |
"eval_steps_per_second": 3.368, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.20988900100908173, | |
"grad_norm": 0.5196985006332397, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": 1.7947940826416016, | |
"logits/rejected": 0.9839111566543579, | |
"logps/chosen": -0.6005308032035828, | |
"logps/rejected": -0.9484688639640808, | |
"loss": 0.7141, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.9007962942123413, | |
"rewards/margins": 0.5219069719314575, | |
"rewards/rejected": -1.4227031469345093, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.21796165489404642, | |
"grad_norm": 1.302403450012207, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": 2.2894890308380127, | |
"logits/rejected": 1.2887728214263916, | |
"logps/chosen": -0.5904151797294617, | |
"logps/rejected": -1.1889005899429321, | |
"loss": 0.6647, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.8856227993965149, | |
"rewards/margins": 0.8977279663085938, | |
"rewards/rejected": -1.783350944519043, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.2260343087790111, | |
"grad_norm": 0.7729688286781311, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": 0.4874440133571625, | |
"logits/rejected": -0.3855375349521637, | |
"logps/chosen": -0.624158501625061, | |
"logps/rejected": -1.4413455724716187, | |
"loss": 0.5629, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.9362378120422363, | |
"rewards/margins": 1.2257804870605469, | |
"rewards/rejected": -2.162018299102783, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.2341069626639758, | |
"grad_norm": 0.41621893644332886, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": 1.3461151123046875, | |
"logits/rejected": 0.733107328414917, | |
"logps/chosen": -0.7516278624534607, | |
"logps/rejected": -1.6215450763702393, | |
"loss": 0.5544, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -1.1274420022964478, | |
"rewards/margins": 1.3048756122589111, | |
"rewards/rejected": -2.4323174953460693, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.24217961654894046, | |
"grad_norm": 0.45633476972579956, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": 0.6991375684738159, | |
"logits/rejected": 0.1667344868183136, | |
"logps/chosen": -0.7924041152000427, | |
"logps/rejected": -2.521883249282837, | |
"loss": 0.4968, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -1.1886063814163208, | |
"rewards/margins": 2.5942187309265137, | |
"rewards/rejected": -3.782824754714966, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.24217961654894046, | |
"eval_logits/chosen": 1.107033610343933, | |
"eval_logits/rejected": 0.10493909567594528, | |
"eval_logps/chosen": -0.8693537712097168, | |
"eval_logps/rejected": -2.1045310497283936, | |
"eval_loss": 0.4899609684944153, | |
"eval_rewards/accuracies": 0.6633663177490234, | |
"eval_rewards/chosen": -1.3040307760238647, | |
"eval_rewards/margins": 1.852765679359436, | |
"eval_rewards/rejected": -3.15679669380188, | |
"eval_runtime": 30.0102, | |
"eval_samples_per_second": 26.691, | |
"eval_steps_per_second": 3.366, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.25025227043390513, | |
"grad_norm": 2.2053284645080566, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": 0.6556342244148254, | |
"logits/rejected": -0.14924369752407074, | |
"logps/chosen": -0.8949082493782043, | |
"logps/rejected": -2.1485395431518555, | |
"loss": 0.5372, | |
"rewards/accuracies": 0.699999988079071, | |
"rewards/chosen": -1.342362403869629, | |
"rewards/margins": 1.8804467916488647, | |
"rewards/rejected": -3.222809314727783, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.25832492431886983, | |
"grad_norm": 0.8357079029083252, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": 1.4236268997192383, | |
"logits/rejected": 0.44775086641311646, | |
"logps/chosen": -0.8566747903823853, | |
"logps/rejected": -2.4265379905700684, | |
"loss": 0.4974, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -1.285012125968933, | |
"rewards/margins": 2.35479474067688, | |
"rewards/rejected": -3.6398072242736816, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.26639757820383453, | |
"grad_norm": 1.5514745712280273, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": 1.0238934755325317, | |
"logits/rejected": 0.31885427236557007, | |
"logps/chosen": -0.9286335110664368, | |
"logps/rejected": -2.957723379135132, | |
"loss": 0.4777, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.3929500579833984, | |
"rewards/margins": 3.043635129928589, | |
"rewards/rejected": -4.436585426330566, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.27447023208879917, | |
"grad_norm": 0.7523798942565918, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": 1.0386161804199219, | |
"logits/rejected": 0.1279783844947815, | |
"logps/chosen": -1.0053694248199463, | |
"logps/rejected": -2.8961727619171143, | |
"loss": 0.4718, | |
"rewards/accuracies": 0.7749999761581421, | |
"rewards/chosen": -1.5080541372299194, | |
"rewards/margins": 2.8362045288085938, | |
"rewards/rejected": -4.344258785247803, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.28254288597376387, | |
"grad_norm": 0.6102933287620544, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": 0.5982325077056885, | |
"logits/rejected": 0.07386422157287598, | |
"logps/chosen": -1.0740950107574463, | |
"logps/rejected": -2.4773449897766113, | |
"loss": 0.4909, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -1.6111425161361694, | |
"rewards/margins": 2.104874849319458, | |
"rewards/rejected": -3.716017246246338, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.28254288597376387, | |
"eval_logits/chosen": 1.3659090995788574, | |
"eval_logits/rejected": 0.5649093985557556, | |
"eval_logps/chosen": -1.113811731338501, | |
"eval_logps/rejected": -2.65985107421875, | |
"eval_loss": 0.44899094104766846, | |
"eval_rewards/accuracies": 0.6633663177490234, | |
"eval_rewards/chosen": -1.6707175970077515, | |
"eval_rewards/margins": 2.319058895111084, | |
"eval_rewards/rejected": -3.989776849746704, | |
"eval_runtime": 29.9887, | |
"eval_samples_per_second": 26.71, | |
"eval_steps_per_second": 3.368, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.29061553985872857, | |
"grad_norm": 0.4950815737247467, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": 1.4646549224853516, | |
"logits/rejected": 1.0656757354736328, | |
"logps/chosen": -1.0681949853897095, | |
"logps/rejected": -2.9191997051239014, | |
"loss": 0.4561, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -1.6022924184799194, | |
"rewards/margins": 2.7765071392059326, | |
"rewards/rejected": -4.3787994384765625, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.29868819374369326, | |
"grad_norm": 1.830091118812561, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": 2.0835390090942383, | |
"logits/rejected": 1.3285930156707764, | |
"logps/chosen": -1.1288923025131226, | |
"logps/rejected": -3.2559380531311035, | |
"loss": 0.4305, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -1.693338394165039, | |
"rewards/margins": 3.190568208694458, | |
"rewards/rejected": -4.883906364440918, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.3067608476286579, | |
"grad_norm": 2.1292569637298584, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": 1.5609261989593506, | |
"logits/rejected": 1.0038378238677979, | |
"logps/chosen": -1.2937371730804443, | |
"logps/rejected": -3.3292288780212402, | |
"loss": 0.4358, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -1.9406057596206665, | |
"rewards/margins": 3.0532374382019043, | |
"rewards/rejected": -4.993843078613281, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.3148335015136226, | |
"grad_norm": 2.124483346939087, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": 1.9905683994293213, | |
"logits/rejected": 1.498375415802002, | |
"logps/chosen": -1.4837720394134521, | |
"logps/rejected": -3.7814183235168457, | |
"loss": 0.4565, | |
"rewards/accuracies": 0.7250000238418579, | |
"rewards/chosen": -2.225658416748047, | |
"rewards/margins": 3.44646954536438, | |
"rewards/rejected": -5.672127723693848, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.3229061553985873, | |
"grad_norm": 1.8990832567214966, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": 2.05663800239563, | |
"logits/rejected": 1.7521194219589233, | |
"logps/chosen": -1.662553071975708, | |
"logps/rejected": -4.254827976226807, | |
"loss": 0.408, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -2.4938297271728516, | |
"rewards/margins": 3.8884124755859375, | |
"rewards/rejected": -6.382241725921631, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3229061553985873, | |
"eval_logits/chosen": 1.7070311307907104, | |
"eval_logits/rejected": 1.3909664154052734, | |
"eval_logps/chosen": -2.0459556579589844, | |
"eval_logps/rejected": -4.069729804992676, | |
"eval_loss": 0.38578492403030396, | |
"eval_rewards/accuracies": 0.8613861203193665, | |
"eval_rewards/chosen": -3.0689334869384766, | |
"eval_rewards/margins": 3.035661458969116, | |
"eval_rewards/rejected": -6.104594707489014, | |
"eval_runtime": 30.015, | |
"eval_samples_per_second": 26.687, | |
"eval_steps_per_second": 3.365, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.33097880928355194, | |
"grad_norm": 2.627016067504883, | |
"learning_rate": 4.133551509975264e-06, | |
"logits/chosen": 1.8818864822387695, | |
"logits/rejected": 1.6801849603652954, | |
"logps/chosen": -2.145986318588257, | |
"logps/rejected": -4.5329155921936035, | |
"loss": 0.3894, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -3.218979597091675, | |
"rewards/margins": 3.5803935527801514, | |
"rewards/rejected": -6.799372673034668, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.33905146316851664, | |
"grad_norm": 2.1047608852386475, | |
"learning_rate": 4.093559974371725e-06, | |
"logits/chosen": 2.082624912261963, | |
"logits/rejected": 1.7804310321807861, | |
"logps/chosen": -2.498039722442627, | |
"logps/rejected": -4.982306957244873, | |
"loss": 0.3853, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -3.7470593452453613, | |
"rewards/margins": 3.7264015674591064, | |
"rewards/rejected": -7.4734601974487305, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.34712411705348134, | |
"grad_norm": 1.7552043199539185, | |
"learning_rate": 4.052869450695776e-06, | |
"logits/chosen": 2.693586826324463, | |
"logits/rejected": 2.2467360496520996, | |
"logps/chosen": -2.717463731765747, | |
"logps/rejected": -5.170213222503662, | |
"loss": 0.3669, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -4.07619571685791, | |
"rewards/margins": 3.6791248321533203, | |
"rewards/rejected": -7.755320072174072, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.35519677093844604, | |
"grad_norm": 1.5780164003372192, | |
"learning_rate": 4.011497787155938e-06, | |
"logits/chosen": 1.6175695657730103, | |
"logits/rejected": 1.4590227603912354, | |
"logps/chosen": -2.6292812824249268, | |
"logps/rejected": -5.3910956382751465, | |
"loss": 0.3333, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -3.9439215660095215, | |
"rewards/margins": 4.142721176147461, | |
"rewards/rejected": -8.086642265319824, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.3632694248234107, | |
"grad_norm": 4.690535545349121, | |
"learning_rate": 3.969463130731183e-06, | |
"logits/chosen": 1.8307468891143799, | |
"logits/rejected": 1.500794768333435, | |
"logps/chosen": -2.872072696685791, | |
"logps/rejected": -5.305605411529541, | |
"loss": 0.356, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -4.308108329772949, | |
"rewards/margins": 3.6502983570098877, | |
"rewards/rejected": -7.958407402038574, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3632694248234107, | |
"eval_logits/chosen": 1.702280044555664, | |
"eval_logits/rejected": 1.3865309953689575, | |
"eval_logps/chosen": -2.8212814331054688, | |
"eval_logps/rejected": -5.271735668182373, | |
"eval_loss": 0.33659350872039795, | |
"eval_rewards/accuracies": 0.9009901285171509, | |
"eval_rewards/chosen": -4.231922149658203, | |
"eval_rewards/margins": 3.6756813526153564, | |
"eval_rewards/rejected": -7.9076032638549805, | |
"eval_runtime": 30.0131, | |
"eval_samples_per_second": 26.688, | |
"eval_steps_per_second": 3.365, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3713420787083754, | |
"grad_norm": 2.641991138458252, | |
"learning_rate": 3.92678391921108e-06, | |
"logits/chosen": 2.2038960456848145, | |
"logits/rejected": 1.8302087783813477, | |
"logps/chosen": -3.0162768363952637, | |
"logps/rejected": -5.90580940246582, | |
"loss": 0.3141, | |
"rewards/accuracies": 0.824999988079071, | |
"rewards/chosen": -4.524415493011475, | |
"rewards/margins": 4.334298610687256, | |
"rewards/rejected": -8.85871410369873, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.3794147325933401, | |
"grad_norm": 1.9930158853530884, | |
"learning_rate": 3.88347887310836e-06, | |
"logits/chosen": 1.655861258506775, | |
"logits/rejected": 1.562140703201294, | |
"logps/chosen": -2.6926944255828857, | |
"logps/rejected": -5.58877420425415, | |
"loss": 0.3298, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.039041519165039, | |
"rewards/margins": 4.344119548797607, | |
"rewards/rejected": -8.383161544799805, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.3874873864783047, | |
"grad_norm": 2.796666383743286, | |
"learning_rate": 3.839566987447492e-06, | |
"logits/chosen": 1.465962290763855, | |
"logits/rejected": 1.0425068140029907, | |
"logps/chosen": -2.9049878120422363, | |
"logps/rejected": -5.620814323425293, | |
"loss": 0.3071, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -4.357481956481934, | |
"rewards/margins": 4.073739051818848, | |
"rewards/rejected": -8.431221961975098, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.3955600403632694, | |
"grad_norm": 3.4124133586883545, | |
"learning_rate": 3.795067523432826e-06, | |
"logits/chosen": 1.545910120010376, | |
"logits/rejected": 1.2031619548797607, | |
"logps/chosen": -3.081360340118408, | |
"logps/rejected": -6.2000017166137695, | |
"loss": 0.2638, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -4.622040748596191, | |
"rewards/margins": 4.677962779998779, | |
"rewards/rejected": -9.300004959106445, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.4036326942482341, | |
"grad_norm": 2.4986650943756104, | |
"learning_rate": 3.7500000000000005e-06, | |
"logits/chosen": 2.3995444774627686, | |
"logits/rejected": 2.111048698425293, | |
"logps/chosen": -3.1852879524230957, | |
"logps/rejected": -5.824184417724609, | |
"loss": 0.2592, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -4.777932167053223, | |
"rewards/margins": 3.9583442211151123, | |
"rewards/rejected": -8.736276626586914, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.4036326942482341, | |
"eval_logits/chosen": 2.3460052013397217, | |
"eval_logits/rejected": 1.8444068431854248, | |
"eval_logps/chosen": -3.00115704536438, | |
"eval_logps/rejected": -5.741134166717529, | |
"eval_loss": 0.3161226809024811, | |
"eval_rewards/accuracies": 0.9009901285171509, | |
"eval_rewards/chosen": -4.501734733581543, | |
"eval_rewards/margins": 4.1099653244018555, | |
"eval_rewards/rejected": -8.611700057983398, | |
"eval_runtime": 30.0035, | |
"eval_samples_per_second": 26.697, | |
"eval_steps_per_second": 3.366, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.4117053481331988, | |
"grad_norm": 2.1488800048828125, | |
"learning_rate": 3.7043841852542884e-06, | |
"logits/chosen": 2.289475440979004, | |
"logits/rejected": 2.1274220943450928, | |
"logps/chosen": -3.0586507320404053, | |
"logps/rejected": -6.597572326660156, | |
"loss": 0.2772, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.587975978851318, | |
"rewards/margins": 5.308382034301758, | |
"rewards/rejected": -9.89635944366455, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.41977800201816345, | |
"grad_norm": 4.558802604675293, | |
"learning_rate": 3.658240087799655e-06, | |
"logits/chosen": 2.7750000953674316, | |
"logits/rejected": 2.718843936920166, | |
"logps/chosen": -3.142796039581299, | |
"logps/rejected": -6.112117767333984, | |
"loss": 0.2724, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.714193820953369, | |
"rewards/margins": 4.453982830047607, | |
"rewards/rejected": -9.168176651000977, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.42785065590312815, | |
"grad_norm": 4.052336692810059, | |
"learning_rate": 3.611587947962319e-06, | |
"logits/chosen": 2.6630032062530518, | |
"logits/rejected": 2.3567159175872803, | |
"logps/chosen": -2.735839605331421, | |
"logps/rejected": -5.979222774505615, | |
"loss": 0.2616, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.103759288787842, | |
"rewards/margins": 4.865074634552002, | |
"rewards/rejected": -8.968833923339844, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.43592330978809285, | |
"grad_norm": 4.516399383544922, | |
"learning_rate": 3.564448228912682e-06, | |
"logits/chosen": 2.0932493209838867, | |
"logits/rejected": 2.0154340267181396, | |
"logps/chosen": -3.2643024921417236, | |
"logps/rejected": -6.601607322692871, | |
"loss": 0.2954, | |
"rewards/accuracies": 0.8374999761581421, | |
"rewards/chosen": -4.896453857421875, | |
"rewards/margins": 5.00595760345459, | |
"rewards/rejected": -9.902410507202148, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.4439959636730575, | |
"grad_norm": 5.320217132568359, | |
"learning_rate": 3.516841607689501e-06, | |
"logits/chosen": 2.524789333343506, | |
"logits/rejected": 2.2494006156921387, | |
"logps/chosen": -3.190767526626587, | |
"logps/rejected": -6.47235107421875, | |
"loss": 0.26, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -4.786150932312012, | |
"rewards/margins": 4.922375679016113, | |
"rewards/rejected": -9.708526611328125, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.4439959636730575, | |
"eval_logits/chosen": 2.328671932220459, | |
"eval_logits/rejected": 1.9636019468307495, | |
"eval_logps/chosen": -3.0293216705322266, | |
"eval_logps/rejected": -6.1273980140686035, | |
"eval_loss": 0.28268176317214966, | |
"eval_rewards/accuracies": 0.9405940771102905, | |
"eval_rewards/chosen": -4.54398250579834, | |
"eval_rewards/margins": 4.64711332321167, | |
"eval_rewards/rejected": -9.191096305847168, | |
"eval_runtime": 30.0135, | |
"eval_samples_per_second": 26.688, | |
"eval_steps_per_second": 3.365, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.4520686175580222, | |
"grad_norm": 2.362356424331665, | |
"learning_rate": 3.4687889661302577e-06, | |
"logits/chosen": 2.4379239082336426, | |
"logits/rejected": 2.2238965034484863, | |
"logps/chosen": -3.121654510498047, | |
"logps/rejected": -6.944204807281494, | |
"loss": 0.2752, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.68248176574707, | |
"rewards/margins": 5.733824729919434, | |
"rewards/rejected": -10.41630744934082, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.4601412714429869, | |
"grad_norm": 6.080317497253418, | |
"learning_rate": 3.4203113817116955e-06, | |
"logits/chosen": 1.9741709232330322, | |
"logits/rejected": 1.7863889932632446, | |
"logps/chosen": -3.5348823070526123, | |
"logps/rejected": -7.0638427734375, | |
"loss": 0.2666, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -5.302323818206787, | |
"rewards/margins": 5.293439865112305, | |
"rewards/rejected": -10.595763206481934, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.4682139253279516, | |
"grad_norm": 2.5477423667907715, | |
"learning_rate": 3.3714301183045382e-06, | |
"logits/chosen": 2.9160943031311035, | |
"logits/rejected": 2.747884750366211, | |
"logps/chosen": -3.287876844406128, | |
"logps/rejected": -6.996206760406494, | |
"loss": 0.2362, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -4.9318156242370605, | |
"rewards/margins": 5.562494277954102, | |
"rewards/rejected": -10.494308471679688, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.47628657921291623, | |
"grad_norm": 2.0826616287231445, | |
"learning_rate": 3.3221666168464584e-06, | |
"logits/chosen": 2.3059380054473877, | |
"logits/rejected": 2.1479601860046387, | |
"logps/chosen": -3.25227689743042, | |
"logps/rejected": -6.894504547119141, | |
"loss": 0.2774, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -4.878415584564209, | |
"rewards/margins": 5.46334171295166, | |
"rewards/rejected": -10.341755867004395, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.4843592330978809, | |
"grad_norm": 2.689568519592285, | |
"learning_rate": 3.272542485937369e-06, | |
"logits/chosen": 2.776691436767578, | |
"logits/rejected": 2.309044361114502, | |
"logps/chosen": -3.4026119709014893, | |
"logps/rejected": -6.8883819580078125, | |
"loss": 0.3082, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -5.103917121887207, | |
"rewards/margins": 5.228655815124512, | |
"rewards/rejected": -10.332572937011719, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.4843592330978809, | |
"eval_logits/chosen": 2.1815295219421387, | |
"eval_logits/rejected": 1.8988540172576904, | |
"eval_logps/chosen": -3.1169536113739014, | |
"eval_logps/rejected": -6.34333610534668, | |
"eval_loss": 0.2698034942150116, | |
"eval_rewards/accuracies": 0.9504950642585754, | |
"eval_rewards/chosen": -4.6754302978515625, | |
"eval_rewards/margins": 4.839573860168457, | |
"eval_rewards/rejected": -9.51500415802002, | |
"eval_runtime": 30.0083, | |
"eval_samples_per_second": 26.693, | |
"eval_steps_per_second": 3.366, | |
"step": 600 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.4597706992781885e+18, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |