phi3m0128-wds-0.75-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-900
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.7992895204262878, | |
"eval_steps": 50, | |
"global_step": 900, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008880994671403197, | |
"grad_norm": 0.045356735587120056, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 14.352750778198242, | |
"logits/rejected": 14.82281494140625, | |
"logps/chosen": -0.2592294216156006, | |
"logps/rejected": -0.32852867245674133, | |
"loss": 0.9315, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.38884416222572327, | |
"rewards/margins": 0.10394889116287231, | |
"rewards/rejected": -0.4927930235862732, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.017761989342806393, | |
"grad_norm": 0.05255189165472984, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.80792236328125, | |
"logits/rejected": 15.017183303833008, | |
"logps/chosen": -0.2825874388217926, | |
"logps/rejected": -0.36373966932296753, | |
"loss": 0.9318, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.4238811433315277, | |
"rewards/margins": 0.12172831594944, | |
"rewards/rejected": -0.5456094741821289, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02664298401420959, | |
"grad_norm": 0.056027185171842575, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.45336627960205, | |
"logits/rejected": 15.261484146118164, | |
"logps/chosen": -0.2638034522533417, | |
"logps/rejected": -0.37216562032699585, | |
"loss": 0.9211, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -0.3957051932811737, | |
"rewards/margins": 0.1625431776046753, | |
"rewards/rejected": -0.5582484006881714, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.035523978685612786, | |
"grad_norm": 0.06511653959751129, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.59851360321045, | |
"logits/rejected": 15.112770080566406, | |
"logps/chosen": -0.28972768783569336, | |
"logps/rejected": -0.36043626070022583, | |
"loss": 0.941, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.43459147214889526, | |
"rewards/margins": 0.10606291145086288, | |
"rewards/rejected": -0.5406544208526611, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04440497335701599, | |
"grad_norm": 0.0637577474117279, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.678179740905762, | |
"logits/rejected": 15.114399909973145, | |
"logps/chosen": -0.3033604919910431, | |
"logps/rejected": -0.3262741267681122, | |
"loss": 0.929, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.45504075288772583, | |
"rewards/margins": 0.03437047079205513, | |
"rewards/rejected": -0.4894111752510071, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04440497335701599, | |
"eval_logits/chosen": 14.936029434204102, | |
"eval_logits/rejected": 14.780267715454102, | |
"eval_logps/chosen": -0.29386037588119507, | |
"eval_logps/rejected": -0.3304942548274994, | |
"eval_loss": 0.9458721876144409, | |
"eval_rewards/accuracies": 0.49450549483299255, | |
"eval_rewards/chosen": -0.4407905340194702, | |
"eval_rewards/margins": 0.05495081841945648, | |
"eval_rewards/rejected": -0.49574142694473267, | |
"eval_runtime": 27.7436, | |
"eval_samples_per_second": 26.24, | |
"eval_steps_per_second": 3.28, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.05328596802841918, | |
"grad_norm": 0.06470987200737, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": 14.19079303741455, | |
"logits/rejected": 14.986845016479492, | |
"logps/chosen": -0.26126712560653687, | |
"logps/rejected": -0.31976616382598877, | |
"loss": 0.9335, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.3919006288051605, | |
"rewards/margins": 0.08774860948324203, | |
"rewards/rejected": -0.47964924573898315, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.06216696269982238, | |
"grad_norm": 0.06163545697927475, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": 14.062408447265625, | |
"logits/rejected": 15.050743103027344, | |
"logps/chosen": -0.2698076367378235, | |
"logps/rejected": -0.37131980061531067, | |
"loss": 0.9234, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.4047114849090576, | |
"rewards/margins": 0.1522682160139084, | |
"rewards/rejected": -0.556979775428772, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.07104795737122557, | |
"grad_norm": 0.05943402647972107, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": 14.523780822753906, | |
"logits/rejected": 15.173608779907227, | |
"logps/chosen": -0.2835150957107544, | |
"logps/rejected": -0.35379332304000854, | |
"loss": 0.9317, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.4252726137638092, | |
"rewards/margins": 0.10541732609272003, | |
"rewards/rejected": -0.5306899547576904, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07992895204262877, | |
"grad_norm": 0.05566830188035965, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": 14.752237319946289, | |
"logits/rejected": 15.213434219360352, | |
"logps/chosen": -0.25490278005599976, | |
"logps/rejected": -0.31673288345336914, | |
"loss": 0.9276, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.38235417008399963, | |
"rewards/margins": 0.09274514764547348, | |
"rewards/rejected": -0.4750993847846985, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08880994671403197, | |
"grad_norm": 0.07879115641117096, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": 14.362078666687012, | |
"logits/rejected": 14.708786010742188, | |
"logps/chosen": -0.27969443798065186, | |
"logps/rejected": -0.3294224143028259, | |
"loss": 0.9448, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.41954168677330017, | |
"rewards/margins": 0.0745919868350029, | |
"rewards/rejected": -0.4941336512565613, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08880994671403197, | |
"eval_logits/chosen": 14.668691635131836, | |
"eval_logits/rejected": 14.53217601776123, | |
"eval_logps/chosen": -0.2837528884410858, | |
"eval_logps/rejected": -0.33105531334877014, | |
"eval_loss": 0.9382757544517517, | |
"eval_rewards/accuracies": 0.5164835453033447, | |
"eval_rewards/chosen": -0.4256293475627899, | |
"eval_rewards/margins": 0.07095365226268768, | |
"eval_rewards/rejected": -0.4965830445289612, | |
"eval_runtime": 26.9204, | |
"eval_samples_per_second": 27.043, | |
"eval_steps_per_second": 3.38, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09769094138543517, | |
"grad_norm": 0.0677201971411705, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": 14.2677640914917, | |
"logits/rejected": 14.437828063964844, | |
"logps/chosen": -0.26914283633232117, | |
"logps/rejected": -0.3498677909374237, | |
"loss": 0.9307, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.40371423959732056, | |
"rewards/margins": 0.12108743190765381, | |
"rewards/rejected": -0.5248016715049744, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.10657193605683836, | |
"grad_norm": 0.08429163694381714, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": 13.978253364562988, | |
"logits/rejected": 14.673884391784668, | |
"logps/chosen": -0.2842218279838562, | |
"logps/rejected": -0.35421326756477356, | |
"loss": 0.9046, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.4263327121734619, | |
"rewards/margins": 0.10498716682195663, | |
"rewards/rejected": -0.5313198566436768, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.11545293072824156, | |
"grad_norm": 0.08969741314649582, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": 13.98906135559082, | |
"logits/rejected": 14.70555305480957, | |
"logps/chosen": -0.26557397842407227, | |
"logps/rejected": -0.3823702037334442, | |
"loss": 0.9151, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.3983609974384308, | |
"rewards/margins": 0.1751943826675415, | |
"rewards/rejected": -0.5735553503036499, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.12433392539964476, | |
"grad_norm": 0.10470724105834961, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": 13.948530197143555, | |
"logits/rejected": 14.77311897277832, | |
"logps/chosen": -0.2675064504146576, | |
"logps/rejected": -0.3631269335746765, | |
"loss": 0.9053, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.4012596607208252, | |
"rewards/margins": 0.14343078434467316, | |
"rewards/rejected": -0.5446904301643372, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.13321492007104796, | |
"grad_norm": 0.09238290041685104, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": 12.939355850219727, | |
"logits/rejected": 13.387840270996094, | |
"logps/chosen": -0.27794820070266724, | |
"logps/rejected": -0.3520648777484894, | |
"loss": 0.9097, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.41692233085632324, | |
"rewards/margins": 0.11117497831583023, | |
"rewards/rejected": -0.5280972719192505, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13321492007104796, | |
"eval_logits/chosen": 13.123927116394043, | |
"eval_logits/rejected": 13.085403442382812, | |
"eval_logps/chosen": -0.28104570508003235, | |
"eval_logps/rejected": -0.3562033474445343, | |
"eval_loss": 0.9194123148918152, | |
"eval_rewards/accuracies": 0.5824176073074341, | |
"eval_rewards/chosen": -0.42156851291656494, | |
"eval_rewards/margins": 0.11273646354675293, | |
"eval_rewards/rejected": -0.5343050360679626, | |
"eval_runtime": 26.928, | |
"eval_samples_per_second": 27.035, | |
"eval_steps_per_second": 3.379, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.14209591474245115, | |
"grad_norm": 0.14459910988807678, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": 11.937938690185547, | |
"logits/rejected": 13.300163269042969, | |
"logps/chosen": -0.24840429425239563, | |
"logps/rejected": -0.4205872118473053, | |
"loss": 0.8929, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.37260642647743225, | |
"rewards/margins": 0.2582743763923645, | |
"rewards/rejected": -0.6308808326721191, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.15097690941385436, | |
"grad_norm": 0.13749755918979645, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": 12.549962997436523, | |
"logits/rejected": 12.858027458190918, | |
"logps/chosen": -0.2944340109825134, | |
"logps/rejected": -0.38480544090270996, | |
"loss": 0.895, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.44165101647377014, | |
"rewards/margins": 0.1355571448802948, | |
"rewards/rejected": -0.5772081613540649, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.15985790408525755, | |
"grad_norm": 0.14530803263187408, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": 10.892537117004395, | |
"logits/rejected": 11.756756782531738, | |
"logps/chosen": -0.2693363130092621, | |
"logps/rejected": -0.3874126374721527, | |
"loss": 0.8801, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4040044844150543, | |
"rewards/margins": 0.17711447179317474, | |
"rewards/rejected": -0.5811189413070679, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.16873889875666073, | |
"grad_norm": 0.20053793489933014, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": 9.238119125366211, | |
"logits/rejected": 10.233763694763184, | |
"logps/chosen": -0.269479900598526, | |
"logps/rejected": -0.4149019122123718, | |
"loss": 0.8776, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.4042198657989502, | |
"rewards/margins": 0.21813304722309113, | |
"rewards/rejected": -0.6223528385162354, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.17761989342806395, | |
"grad_norm": 0.20254959166049957, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": 9.261235237121582, | |
"logits/rejected": 9.7451753616333, | |
"logps/chosen": -0.29611462354660034, | |
"logps/rejected": -0.44530659914016724, | |
"loss": 0.8596, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.4441719651222229, | |
"rewards/margins": 0.22378793358802795, | |
"rewards/rejected": -0.6679598689079285, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.17761989342806395, | |
"eval_logits/chosen": 8.231317520141602, | |
"eval_logits/rejected": 8.285564422607422, | |
"eval_logps/chosen": -0.3201982080936432, | |
"eval_logps/rejected": -0.4862101078033447, | |
"eval_loss": 0.8602121472358704, | |
"eval_rewards/accuracies": 0.6153846383094788, | |
"eval_rewards/chosen": -0.480297327041626, | |
"eval_rewards/margins": 0.2490178644657135, | |
"eval_rewards/rejected": -0.7293152213096619, | |
"eval_runtime": 26.9054, | |
"eval_samples_per_second": 27.058, | |
"eval_steps_per_second": 3.382, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.18650088809946713, | |
"grad_norm": 0.25219231843948364, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": 7.822943687438965, | |
"logits/rejected": 8.847550392150879, | |
"logps/chosen": -0.3111446797847748, | |
"logps/rejected": -0.5253105163574219, | |
"loss": 0.8553, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.46671706438064575, | |
"rewards/margins": 0.3212486803531647, | |
"rewards/rejected": -0.787965714931488, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.19538188277087035, | |
"grad_norm": 0.29052653908729553, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": 7.404467582702637, | |
"logits/rejected": 7.140469551086426, | |
"logps/chosen": -0.35531798005104065, | |
"logps/rejected": -0.4979163110256195, | |
"loss": 0.8204, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.5329769849777222, | |
"rewards/margins": 0.21389751136302948, | |
"rewards/rejected": -0.7468745112419128, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.20426287744227353, | |
"grad_norm": 0.30769258737564087, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": 5.096299648284912, | |
"logits/rejected": 5.143233299255371, | |
"logps/chosen": -0.3684440553188324, | |
"logps/rejected": -0.557721734046936, | |
"loss": 0.7884, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.5526660084724426, | |
"rewards/margins": 0.2839165925979614, | |
"rewards/rejected": -0.8365826606750488, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.21314387211367672, | |
"grad_norm": 0.3165770471096039, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": 5.376433372497559, | |
"logits/rejected": 4.738249778747559, | |
"logps/chosen": -0.4181212782859802, | |
"logps/rejected": -0.6782273054122925, | |
"loss": 0.7919, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.627181887626648, | |
"rewards/margins": 0.39015907049179077, | |
"rewards/rejected": -1.0173410177230835, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.22202486678507993, | |
"grad_norm": 0.6854519844055176, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": 3.3769917488098145, | |
"logits/rejected": 2.865615129470825, | |
"logps/chosen": -0.42344069480895996, | |
"logps/rejected": -0.8046137094497681, | |
"loss": 0.7456, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -0.6351610422134399, | |
"rewards/margins": 0.5717595219612122, | |
"rewards/rejected": -1.2069203853607178, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.22202486678507993, | |
"eval_logits/chosen": 2.8929049968719482, | |
"eval_logits/rejected": 2.0500316619873047, | |
"eval_logps/chosen": -0.46059882640838623, | |
"eval_logps/rejected": -0.8680218458175659, | |
"eval_loss": 0.7499477863311768, | |
"eval_rewards/accuracies": 0.6373626589775085, | |
"eval_rewards/chosen": -0.6908981204032898, | |
"eval_rewards/margins": 0.6111345887184143, | |
"eval_rewards/rejected": -1.3020328283309937, | |
"eval_runtime": 26.9489, | |
"eval_samples_per_second": 27.014, | |
"eval_steps_per_second": 3.377, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.23090586145648312, | |
"grad_norm": 1.4715200662612915, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": 1.1702392101287842, | |
"logits/rejected": 0.6887636184692383, | |
"logps/chosen": -0.4449438154697418, | |
"logps/rejected": -1.0861380100250244, | |
"loss": 0.6853, | |
"rewards/accuracies": 0.7749999761581421, | |
"rewards/chosen": -0.6674157381057739, | |
"rewards/margins": 0.9617912173271179, | |
"rewards/rejected": -1.6292070150375366, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.23978685612788633, | |
"grad_norm": 1.1773816347122192, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": 2.005816698074341, | |
"logits/rejected": 0.7508751749992371, | |
"logps/chosen": -0.520461916923523, | |
"logps/rejected": -1.2372539043426514, | |
"loss": 0.6514, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -0.7806928157806396, | |
"rewards/margins": 1.0751880407333374, | |
"rewards/rejected": -1.8558809757232666, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.24866785079928952, | |
"grad_norm": 0.4426538348197937, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": 1.3614282608032227, | |
"logits/rejected": 0.17441503703594208, | |
"logps/chosen": -0.628559410572052, | |
"logps/rejected": -1.3841784000396729, | |
"loss": 0.6447, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.9428391456604004, | |
"rewards/margins": 1.1334283351898193, | |
"rewards/rejected": -2.076267719268799, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.25754884547069273, | |
"grad_norm": 0.781704843044281, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": 2.0829949378967285, | |
"logits/rejected": 1.0815263986587524, | |
"logps/chosen": -0.6394428014755249, | |
"logps/rejected": -1.8771930932998657, | |
"loss": 0.5863, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.9591643214225769, | |
"rewards/margins": 1.8566251993179321, | |
"rewards/rejected": -2.8157896995544434, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.2664298401420959, | |
"grad_norm": 1.1327613592147827, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": 1.197505235671997, | |
"logits/rejected": 0.16260084509849548, | |
"logps/chosen": -0.6966903805732727, | |
"logps/rejected": -2.2350592613220215, | |
"loss": 0.5354, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.0450356006622314, | |
"rewards/margins": 2.30755352973938, | |
"rewards/rejected": -3.3525891304016113, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.2664298401420959, | |
"eval_logits/chosen": 1.2078615427017212, | |
"eval_logits/rejected": 0.2717524468898773, | |
"eval_logps/chosen": -0.7531170845031738, | |
"eval_logps/rejected": -2.0594394207000732, | |
"eval_loss": 0.5640697479248047, | |
"eval_rewards/accuracies": 0.6703296899795532, | |
"eval_rewards/chosen": -1.1296755075454712, | |
"eval_rewards/margins": 1.9594837427139282, | |
"eval_rewards/rejected": -3.0891592502593994, | |
"eval_runtime": 26.9506, | |
"eval_samples_per_second": 27.012, | |
"eval_steps_per_second": 3.377, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.2753108348134991, | |
"grad_norm": 0.8920393586158752, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": 1.12120521068573, | |
"logits/rejected": 0.3509993851184845, | |
"logps/chosen": -0.7844308018684387, | |
"logps/rejected": -2.2886409759521484, | |
"loss": 0.5357, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -1.176646113395691, | |
"rewards/margins": 2.256315231323242, | |
"rewards/rejected": -3.4329617023468018, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.2841918294849023, | |
"grad_norm": 0.6658002734184265, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": 1.750349760055542, | |
"logits/rejected": 0.7707412242889404, | |
"logps/chosen": -0.7995473146438599, | |
"logps/rejected": -2.2184109687805176, | |
"loss": 0.5412, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -1.199320912361145, | |
"rewards/margins": 2.128295421600342, | |
"rewards/rejected": -3.3276162147521973, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.29307282415630553, | |
"grad_norm": 1.2708265781402588, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": 1.8776671886444092, | |
"logits/rejected": 1.2588229179382324, | |
"logps/chosen": -0.894809901714325, | |
"logps/rejected": -2.4718618392944336, | |
"loss": 0.5502, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -1.342214822769165, | |
"rewards/margins": 2.3655781745910645, | |
"rewards/rejected": -3.7077927589416504, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.3019538188277087, | |
"grad_norm": 1.2345410585403442, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": 1.9461545944213867, | |
"logits/rejected": 0.9410673975944519, | |
"logps/chosen": -0.891791045665741, | |
"logps/rejected": -2.2217679023742676, | |
"loss": 0.5519, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -1.3376867771148682, | |
"rewards/margins": 1.9949653148651123, | |
"rewards/rejected": -3.3326523303985596, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.3108348134991119, | |
"grad_norm": 11.159003257751465, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": 2.396707534790039, | |
"logits/rejected": 1.4882147312164307, | |
"logps/chosen": -1.1443694829940796, | |
"logps/rejected": -2.867492914199829, | |
"loss": 0.4806, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -1.7165542840957642, | |
"rewards/margins": 2.5846848487854004, | |
"rewards/rejected": -4.301239490509033, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.3108348134991119, | |
"eval_logits/chosen": 1.7898317575454712, | |
"eval_logits/rejected": 1.109007477760315, | |
"eval_logps/chosen": -1.3312609195709229, | |
"eval_logps/rejected": -3.0454280376434326, | |
"eval_loss": 0.4698469638824463, | |
"eval_rewards/accuracies": 0.8131868243217468, | |
"eval_rewards/chosen": -1.9968912601470947, | |
"eval_rewards/margins": 2.5712504386901855, | |
"eval_rewards/rejected": -4.568141937255859, | |
"eval_runtime": 26.9108, | |
"eval_samples_per_second": 27.052, | |
"eval_steps_per_second": 3.382, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.3197158081705151, | |
"grad_norm": 2.2653048038482666, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": 0.36641037464141846, | |
"logits/rejected": -0.4621815085411072, | |
"logps/chosen": -1.5972144603729248, | |
"logps/rejected": -3.1685004234313965, | |
"loss": 0.472, | |
"rewards/accuracies": 0.7875000238418579, | |
"rewards/chosen": -2.3958218097686768, | |
"rewards/margins": 2.356929063796997, | |
"rewards/rejected": -4.752750396728516, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.3285968028419183, | |
"grad_norm": 2.6831283569335938, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": 1.18975830078125, | |
"logits/rejected": 0.7668083310127258, | |
"logps/chosen": -2.1462299823760986, | |
"logps/rejected": -3.7489593029022217, | |
"loss": 0.4169, | |
"rewards/accuracies": 0.8374999761581421, | |
"rewards/chosen": -3.2193446159362793, | |
"rewards/margins": 2.4040939807891846, | |
"rewards/rejected": -5.623438835144043, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.33747779751332146, | |
"grad_norm": 3.5498828887939453, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": 1.8030259609222412, | |
"logits/rejected": 1.4276998043060303, | |
"logps/chosen": -2.189352512359619, | |
"logps/rejected": -3.8717925548553467, | |
"loss": 0.4156, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -3.2840285301208496, | |
"rewards/margins": 2.523660182952881, | |
"rewards/rejected": -5.807689189910889, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.3463587921847247, | |
"grad_norm": 1.46839439868927, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": 2.2764127254486084, | |
"logits/rejected": 1.9199844598770142, | |
"logps/chosen": -2.4463610649108887, | |
"logps/rejected": -4.317194938659668, | |
"loss": 0.4085, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -3.669541597366333, | |
"rewards/margins": 2.806250810623169, | |
"rewards/rejected": -6.475791931152344, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.3552397868561279, | |
"grad_norm": 1.6744616031646729, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": 2.0364136695861816, | |
"logits/rejected": 1.4645214080810547, | |
"logps/chosen": -2.5738539695739746, | |
"logps/rejected": -4.41655158996582, | |
"loss": 0.3723, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -3.8607802391052246, | |
"rewards/margins": 2.764047145843506, | |
"rewards/rejected": -6.6248273849487305, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3552397868561279, | |
"eval_logits/chosen": 2.0340981483459473, | |
"eval_logits/rejected": 1.376452922821045, | |
"eval_logps/chosen": -2.6443424224853516, | |
"eval_logps/rejected": -4.638274192810059, | |
"eval_loss": 0.38738909363746643, | |
"eval_rewards/accuracies": 0.901098906993866, | |
"eval_rewards/chosen": -3.9665138721466064, | |
"eval_rewards/margins": 2.9908969402313232, | |
"eval_rewards/rejected": -6.95741081237793, | |
"eval_runtime": 26.943, | |
"eval_samples_per_second": 27.02, | |
"eval_steps_per_second": 3.377, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3641207815275311, | |
"grad_norm": 24.103384017944336, | |
"learning_rate": 4.133551509975264e-06, | |
"logits/chosen": 1.3653905391693115, | |
"logits/rejected": 0.7307125329971313, | |
"logps/chosen": -2.450911045074463, | |
"logps/rejected": -4.609705924987793, | |
"loss": 0.4031, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -3.6763663291931152, | |
"rewards/margins": 3.2381927967071533, | |
"rewards/rejected": -6.914559364318848, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.37300177619893427, | |
"grad_norm": 2.102792263031006, | |
"learning_rate": 4.093559974371725e-06, | |
"logits/chosen": 1.6425203084945679, | |
"logits/rejected": 0.9890750050544739, | |
"logps/chosen": -2.1248934268951416, | |
"logps/rejected": -3.8592541217803955, | |
"loss": 0.3609, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -3.187340497970581, | |
"rewards/margins": 2.601541042327881, | |
"rewards/rejected": -5.788880825042725, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.38188277087033745, | |
"grad_norm": 2.970210552215576, | |
"learning_rate": 4.052869450695776e-06, | |
"logits/chosen": 2.258129358291626, | |
"logits/rejected": 1.7126191854476929, | |
"logps/chosen": -2.9822497367858887, | |
"logps/rejected": -4.995323657989502, | |
"loss": 0.3669, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -4.473374366760254, | |
"rewards/margins": 3.01961088180542, | |
"rewards/rejected": -7.492985725402832, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.3907637655417407, | |
"grad_norm": 2.3307077884674072, | |
"learning_rate": 4.011497787155938e-06, | |
"logits/chosen": 1.5054914951324463, | |
"logits/rejected": 0.6257806420326233, | |
"logps/chosen": -3.2783126831054688, | |
"logps/rejected": -5.625805854797363, | |
"loss": 0.3428, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.917469024658203, | |
"rewards/margins": 3.521239757537842, | |
"rewards/rejected": -8.438708305358887, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.3996447602131439, | |
"grad_norm": 3.2838215827941895, | |
"learning_rate": 3.969463130731183e-06, | |
"logits/chosen": 1.711268424987793, | |
"logits/rejected": 1.4157356023788452, | |
"logps/chosen": -3.0163674354553223, | |
"logps/rejected": -5.020781517028809, | |
"loss": 0.3589, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -4.524550437927246, | |
"rewards/margins": 3.006621837615967, | |
"rewards/rejected": -7.531172752380371, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3996447602131439, | |
"eval_logits/chosen": 2.209981679916382, | |
"eval_logits/rejected": 1.6256438493728638, | |
"eval_logps/chosen": -2.841517925262451, | |
"eval_logps/rejected": -5.154296875, | |
"eval_loss": 0.35665163397789, | |
"eval_rewards/accuracies": 0.9120879173278809, | |
"eval_rewards/chosen": -4.262276649475098, | |
"eval_rewards/margins": 3.4691689014434814, | |
"eval_rewards/rejected": -7.7314453125, | |
"eval_runtime": 26.9493, | |
"eval_samples_per_second": 27.014, | |
"eval_steps_per_second": 3.377, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.40852575488454707, | |
"grad_norm": 3.4476094245910645, | |
"learning_rate": 3.92678391921108e-06, | |
"logits/chosen": 1.3581907749176025, | |
"logits/rejected": 0.8621677160263062, | |
"logps/chosen": -2.831627130508423, | |
"logps/rejected": -5.023026943206787, | |
"loss": 0.3338, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -4.247440338134766, | |
"rewards/margins": 3.287100315093994, | |
"rewards/rejected": -7.534541130065918, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.41740674955595025, | |
"grad_norm": 2.6653027534484863, | |
"learning_rate": 3.88347887310836e-06, | |
"logits/chosen": 1.8693492412567139, | |
"logits/rejected": 1.605238676071167, | |
"logps/chosen": -2.7347512245178223, | |
"logps/rejected": -5.385977745056152, | |
"loss": 0.3348, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.1021270751953125, | |
"rewards/margins": 3.976839542388916, | |
"rewards/rejected": -8.07896614074707, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.42628774422735344, | |
"grad_norm": 6.658248424530029, | |
"learning_rate": 3.839566987447492e-06, | |
"logits/chosen": 1.8902003765106201, | |
"logits/rejected": 1.4942703247070312, | |
"logps/chosen": -3.1413326263427734, | |
"logps/rejected": -5.365391731262207, | |
"loss": 0.3171, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -4.71199893951416, | |
"rewards/margins": 3.3360886573791504, | |
"rewards/rejected": -8.048087120056152, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.4351687388987567, | |
"grad_norm": 3.2576091289520264, | |
"learning_rate": 3.795067523432826e-06, | |
"logits/chosen": 2.301971673965454, | |
"logits/rejected": 1.5673840045928955, | |
"logps/chosen": -2.789170980453491, | |
"logps/rejected": -5.354689598083496, | |
"loss": 0.3238, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.1837568283081055, | |
"rewards/margins": 3.8482773303985596, | |
"rewards/rejected": -8.032033920288086, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.44404973357015987, | |
"grad_norm": 5.094764709472656, | |
"learning_rate": 3.7500000000000005e-06, | |
"logits/chosen": 2.506347179412842, | |
"logits/rejected": 2.2316441535949707, | |
"logps/chosen": -3.1029722690582275, | |
"logps/rejected": -5.757994651794434, | |
"loss": 0.3052, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -4.654458522796631, | |
"rewards/margins": 3.9825336933135986, | |
"rewards/rejected": -8.636991500854492, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.44404973357015987, | |
"eval_logits/chosen": 2.332798480987549, | |
"eval_logits/rejected": 1.8098194599151611, | |
"eval_logps/chosen": -3.269892692565918, | |
"eval_logps/rejected": -5.909496307373047, | |
"eval_loss": 0.324949711561203, | |
"eval_rewards/accuracies": 0.9120879173278809, | |
"eval_rewards/chosen": -4.904839038848877, | |
"eval_rewards/margins": 3.9594056606292725, | |
"eval_rewards/rejected": -8.86424446105957, | |
"eval_runtime": 26.9415, | |
"eval_samples_per_second": 27.021, | |
"eval_steps_per_second": 3.378, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.45293072824156305, | |
"grad_norm": 4.973895072937012, | |
"learning_rate": 3.7043841852542884e-06, | |
"logits/chosen": 1.1408557891845703, | |
"logits/rejected": 0.9078874588012695, | |
"logps/chosen": -3.2813212871551514, | |
"logps/rejected": -5.6853814125061035, | |
"loss": 0.3185, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -4.9219818115234375, | |
"rewards/margins": 3.6060900688171387, | |
"rewards/rejected": -8.528071403503418, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.46181172291296624, | |
"grad_norm": 2.726701021194458, | |
"learning_rate": 3.658240087799655e-06, | |
"logits/chosen": 1.5950994491577148, | |
"logits/rejected": 1.124607801437378, | |
"logps/chosen": -3.333927631378174, | |
"logps/rejected": -6.0137128829956055, | |
"loss": 0.3148, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -5.000891208648682, | |
"rewards/margins": 4.019678592681885, | |
"rewards/rejected": -9.020570755004883, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.4706927175843694, | |
"grad_norm": 4.8865556716918945, | |
"learning_rate": 3.611587947962319e-06, | |
"logits/chosen": 2.257068157196045, | |
"logits/rejected": 1.7986671924591064, | |
"logps/chosen": -3.1947333812713623, | |
"logps/rejected": -6.262570381164551, | |
"loss": 0.3191, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -4.792099952697754, | |
"rewards/margins": 4.601754665374756, | |
"rewards/rejected": -9.393855094909668, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.47957371225577267, | |
"grad_norm": 4.325016021728516, | |
"learning_rate": 3.564448228912682e-06, | |
"logits/chosen": 2.3096823692321777, | |
"logits/rejected": 1.7991451025009155, | |
"logps/chosen": -3.3641560077667236, | |
"logps/rejected": -6.142219543457031, | |
"loss": 0.276, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -5.046233654022217, | |
"rewards/margins": 4.167095184326172, | |
"rewards/rejected": -9.213329315185547, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.48845470692717585, | |
"grad_norm": 3.15216326713562, | |
"learning_rate": 3.516841607689501e-06, | |
"logits/chosen": 1.4415160417556763, | |
"logits/rejected": 1.1298859119415283, | |
"logps/chosen": -2.851362466812134, | |
"logps/rejected": -6.210903167724609, | |
"loss": 0.3257, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -4.277044296264648, | |
"rewards/margins": 5.039311408996582, | |
"rewards/rejected": -9.316354751586914, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.48845470692717585, | |
"eval_logits/chosen": 2.296088695526123, | |
"eval_logits/rejected": 1.8609188795089722, | |
"eval_logps/chosen": -3.422206401824951, | |
"eval_logps/rejected": -6.41662073135376, | |
"eval_loss": 0.30456680059432983, | |
"eval_rewards/accuracies": 0.9120879173278809, | |
"eval_rewards/chosen": -5.133309841156006, | |
"eval_rewards/margins": 4.491621971130371, | |
"eval_rewards/rejected": -9.624931335449219, | |
"eval_runtime": 26.9409, | |
"eval_samples_per_second": 27.022, | |
"eval_steps_per_second": 3.378, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.49733570159857904, | |
"grad_norm": 3.456071615219116, | |
"learning_rate": 3.4687889661302577e-06, | |
"logits/chosen": 1.6153348684310913, | |
"logits/rejected": 1.1999633312225342, | |
"logps/chosen": -3.450087785720825, | |
"logps/rejected": -6.294098854064941, | |
"loss": 0.3094, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -5.175131797790527, | |
"rewards/margins": 4.266016960144043, | |
"rewards/rejected": -9.44114875793457, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.5062166962699822, | |
"grad_norm": 3.7813830375671387, | |
"learning_rate": 3.4203113817116955e-06, | |
"logits/chosen": 2.5460948944091797, | |
"logits/rejected": 2.240358829498291, | |
"logps/chosen": -3.441761016845703, | |
"logps/rejected": -6.200081825256348, | |
"loss": 0.2967, | |
"rewards/accuracies": 0.8374999761581421, | |
"rewards/chosen": -5.1626410484313965, | |
"rewards/margins": 4.137481212615967, | |
"rewards/rejected": -9.300122261047363, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.5150976909413855, | |
"grad_norm": 3.561509847640991, | |
"learning_rate": 3.3714301183045382e-06, | |
"logits/chosen": 2.236506223678589, | |
"logits/rejected": 2.0224223136901855, | |
"logps/chosen": -3.6373534202575684, | |
"logps/rejected": -6.733517646789551, | |
"loss": 0.2781, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -5.45603084564209, | |
"rewards/margins": 4.6442461013793945, | |
"rewards/rejected": -10.1002779006958, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.5239786856127886, | |
"grad_norm": 2.540649175643921, | |
"learning_rate": 3.3221666168464584e-06, | |
"logits/chosen": 1.9122869968414307, | |
"logits/rejected": 1.8370224237442017, | |
"logps/chosen": -3.241629123687744, | |
"logps/rejected": -6.666001319885254, | |
"loss": 0.2678, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -4.862443447113037, | |
"rewards/margins": 5.1365580558776855, | |
"rewards/rejected": -9.999002456665039, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.5328596802841918, | |
"grad_norm": 2.05529522895813, | |
"learning_rate": 3.272542485937369e-06, | |
"logits/chosen": 1.9268839359283447, | |
"logits/rejected": 1.4725837707519531, | |
"logps/chosen": -3.7067363262176514, | |
"logps/rejected": -6.972158908843994, | |
"loss": 0.264, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -5.560103893280029, | |
"rewards/margins": 4.898133754730225, | |
"rewards/rejected": -10.458237648010254, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.5328596802841918, | |
"eval_logits/chosen": 2.3668525218963623, | |
"eval_logits/rejected": 1.9882280826568604, | |
"eval_logps/chosen": -3.6040353775024414, | |
"eval_logps/rejected": -6.91050386428833, | |
"eval_loss": 0.29209136962890625, | |
"eval_rewards/accuracies": 0.9120879173278809, | |
"eval_rewards/chosen": -5.406052589416504, | |
"eval_rewards/margins": 4.95970344543457, | |
"eval_rewards/rejected": -10.365756034851074, | |
"eval_runtime": 26.9486, | |
"eval_samples_per_second": 27.014, | |
"eval_steps_per_second": 3.377, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.5417406749555951, | |
"grad_norm": 5.438882350921631, | |
"learning_rate": 3.222579492361179e-06, | |
"logits/chosen": 2.0183475017547607, | |
"logits/rejected": 1.7007204294204712, | |
"logps/chosen": -3.7757949829101562, | |
"logps/rejected": -6.875540256500244, | |
"loss": 0.2787, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -5.663692951202393, | |
"rewards/margins": 4.6496171951293945, | |
"rewards/rejected": -10.313310623168945, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.5506216696269982, | |
"grad_norm": 6.464421272277832, | |
"learning_rate": 3.1722995515381644e-06, | |
"logits/chosen": 1.8897826671600342, | |
"logits/rejected": 1.6919243335723877, | |
"logps/chosen": -3.738862991333008, | |
"logps/rejected": -7.225765228271484, | |
"loss": 0.296, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -5.6082940101623535, | |
"rewards/margins": 5.230353355407715, | |
"rewards/rejected": -10.83864688873291, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.5595026642984015, | |
"grad_norm": 3.983937978744507, | |
"learning_rate": 3.121724717912138e-06, | |
"logits/chosen": 1.8497775793075562, | |
"logits/rejected": 1.3537144660949707, | |
"logps/chosen": -3.6511616706848145, | |
"logps/rejected": -7.310211181640625, | |
"loss": 0.2604, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -5.476742267608643, | |
"rewards/margins": 5.488574028015137, | |
"rewards/rejected": -10.965316772460938, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.5683836589698046, | |
"grad_norm": 9.850502014160156, | |
"learning_rate": 3.0708771752766397e-06, | |
"logits/chosen": 2.073599338531494, | |
"logits/rejected": 1.3638606071472168, | |
"logps/chosen": -4.1297831535339355, | |
"logps/rejected": -7.344725131988525, | |
"loss": 0.2868, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -6.194674491882324, | |
"rewards/margins": 4.822413444519043, | |
"rewards/rejected": -11.017088890075684, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.5772646536412078, | |
"grad_norm": 3.2157247066497803, | |
"learning_rate": 3.019779227044398e-06, | |
"logits/chosen": 1.492789387702942, | |
"logits/rejected": 1.2012196779251099, | |
"logps/chosen": -3.458716630935669, | |
"logps/rejected": -6.776049613952637, | |
"loss": 0.2489, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -5.188075542449951, | |
"rewards/margins": 4.975998878479004, | |
"rewards/rejected": -10.164074897766113, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.5772646536412078, | |
"eval_logits/chosen": 2.4555883407592773, | |
"eval_logits/rejected": 2.1057684421539307, | |
"eval_logps/chosen": -3.6452677249908447, | |
"eval_logps/rejected": -7.163515090942383, | |
"eval_loss": 0.2778012156486511, | |
"eval_rewards/accuracies": 0.901098906993866, | |
"eval_rewards/chosen": -5.467901706695557, | |
"eval_rewards/margins": 5.277371406555176, | |
"eval_rewards/rejected": -10.745272636413574, | |
"eval_runtime": 26.906, | |
"eval_samples_per_second": 27.057, | |
"eval_steps_per_second": 3.382, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.5861456483126111, | |
"grad_norm": 3.6115524768829346, | |
"learning_rate": 2.9684532864643123e-06, | |
"logits/chosen": 2.066650390625, | |
"logits/rejected": 1.7030436992645264, | |
"logps/chosen": -3.875563859939575, | |
"logps/rejected": -7.133286476135254, | |
"loss": 0.2752, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -5.813345432281494, | |
"rewards/margins": 4.88658332824707, | |
"rewards/rejected": -10.699929237365723, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.5950266429840142, | |
"grad_norm": 3.460318088531494, | |
"learning_rate": 2.9169218667902562e-06, | |
"logits/chosen": 2.433854341506958, | |
"logits/rejected": 2.045027256011963, | |
"logps/chosen": -3.6893928050994873, | |
"logps/rejected": -6.720344543457031, | |
"loss": 0.2562, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -5.534089088439941, | |
"rewards/margins": 4.5464277267456055, | |
"rewards/rejected": -10.080517768859863, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.6039076376554174, | |
"grad_norm": 2.232542037963867, | |
"learning_rate": 2.8652075714060296e-06, | |
"logits/chosen": 2.3069093227386475, | |
"logits/rejected": 1.7321109771728516, | |
"logps/chosen": -3.936506748199463, | |
"logps/rejected": -7.7827630043029785, | |
"loss": 0.2333, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -5.904759883880615, | |
"rewards/margins": 5.769384860992432, | |
"rewards/rejected": -11.674144744873047, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.6127886323268206, | |
"grad_norm": 3.947690010070801, | |
"learning_rate": 2.813333083910761e-06, | |
"logits/chosen": 1.467885136604309, | |
"logits/rejected": 1.0833615064620972, | |
"logps/chosen": -3.2870895862579346, | |
"logps/rejected": -7.22244930267334, | |
"loss": 0.2739, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -4.930634498596191, | |
"rewards/margins": 5.90303897857666, | |
"rewards/rejected": -10.833673477172852, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.6216696269982238, | |
"grad_norm": 4.934174060821533, | |
"learning_rate": 2.761321158169134e-06, | |
"logits/chosen": 2.440274477005005, | |
"logits/rejected": 2.157214403152466, | |
"logps/chosen": -4.241659641265869, | |
"logps/rejected": -7.748563289642334, | |
"loss": 0.2859, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -6.362490177154541, | |
"rewards/margins": 5.260354995727539, | |
"rewards/rejected": -11.622844696044922, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.6216696269982238, | |
"eval_logits/chosen": 2.5609161853790283, | |
"eval_logits/rejected": 2.258920431137085, | |
"eval_logps/chosen": -3.965893507003784, | |
"eval_logps/rejected": -7.77095365524292, | |
"eval_loss": 0.26121068000793457, | |
"eval_rewards/accuracies": 0.9340659379959106, | |
"eval_rewards/chosen": -5.948840618133545, | |
"eval_rewards/margins": 5.707590579986572, | |
"eval_rewards/rejected": -11.656431198120117, | |
"eval_runtime": 26.9101, | |
"eval_samples_per_second": 27.053, | |
"eval_steps_per_second": 3.382, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.6305506216696269, | |
"grad_norm": 5.9008684158325195, | |
"learning_rate": 2.70919460833079e-06, | |
"logits/chosen": 1.6100571155548096, | |
"logits/rejected": 1.341528296470642, | |
"logps/chosen": -3.8634166717529297, | |
"logps/rejected": -7.63654088973999, | |
"loss": 0.2351, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -5.795124053955078, | |
"rewards/margins": 5.659686088562012, | |
"rewards/rejected": -11.454811096191406, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.6394316163410302, | |
"grad_norm": 2.7195751667022705, | |
"learning_rate": 2.6569762988232838e-06, | |
"logits/chosen": 2.1212353706359863, | |
"logits/rejected": 1.864898443222046, | |
"logps/chosen": -3.8487918376922607, | |
"logps/rejected": -7.823777198791504, | |
"loss": 0.2329, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -5.77318811416626, | |
"rewards/margins": 5.9624762535095215, | |
"rewards/rejected": -11.735665321350098, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.6483126110124334, | |
"grad_norm": 4.423059463500977, | |
"learning_rate": 2.604689134322999e-06, | |
"logits/chosen": 2.0979533195495605, | |
"logits/rejected": 1.7213973999023438, | |
"logps/chosen": -3.9874777793884277, | |
"logps/rejected": -7.617144584655762, | |
"loss": 0.2482, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -5.981216907501221, | |
"rewards/margins": 5.4444990158081055, | |
"rewards/rejected": -11.4257173538208, | |
"step": 730 | |
}, | |
{ | |
"epoch": 0.6571936056838366, | |
"grad_norm": 4.327184200286865, | |
"learning_rate": 2.5523560497083927e-06, | |
"logits/chosen": 2.2398109436035156, | |
"logits/rejected": 1.913095474243164, | |
"logps/chosen": -3.72918701171875, | |
"logps/rejected": -7.094988822937012, | |
"loss": 0.223, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -5.593780517578125, | |
"rewards/margins": 5.048702716827393, | |
"rewards/rejected": -10.64248275756836, | |
"step": 740 | |
}, | |
{ | |
"epoch": 0.6660746003552398, | |
"grad_norm": 3.1207029819488525, | |
"learning_rate": 2.5e-06, | |
"logits/chosen": 1.7178529500961304, | |
"logits/rejected": 1.7194137573242188, | |
"logps/chosen": -3.9564640522003174, | |
"logps/rejected": -7.360040187835693, | |
"loss": 0.2227, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -5.934695243835449, | |
"rewards/margins": 5.10536527633667, | |
"rewards/rejected": -11.040060043334961, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.6660746003552398, | |
"eval_logits/chosen": 2.521843671798706, | |
"eval_logits/rejected": 2.2902743816375732, | |
"eval_logps/chosen": -4.141489028930664, | |
"eval_logps/rejected": -8.121171951293945, | |
"eval_loss": 0.25929248332977295, | |
"eval_rewards/accuracies": 0.9120879173278809, | |
"eval_rewards/chosen": -6.212233543395996, | |
"eval_rewards/margins": 5.969525337219238, | |
"eval_rewards/rejected": -12.181758880615234, | |
"eval_runtime": 26.9435, | |
"eval_samples_per_second": 27.019, | |
"eval_steps_per_second": 3.377, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.6749555950266429, | |
"grad_norm": 2.5788235664367676, | |
"learning_rate": 2.447643950291608e-06, | |
"logits/chosen": 1.7696739435195923, | |
"logits/rejected": 1.624455213546753, | |
"logps/chosen": -4.3122711181640625, | |
"logps/rejected": -8.191080093383789, | |
"loss": 0.1991, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -6.468405723571777, | |
"rewards/margins": 5.81821346282959, | |
"rewards/rejected": -12.286620140075684, | |
"step": 760 | |
}, | |
{ | |
"epoch": 0.6838365896980462, | |
"grad_norm": 4.544955730438232, | |
"learning_rate": 2.3953108656770018e-06, | |
"logits/chosen": 1.9560623168945312, | |
"logits/rejected": 1.7740411758422852, | |
"logps/chosen": -4.327980041503906, | |
"logps/rejected": -8.547250747680664, | |
"loss": 0.2301, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -6.491971492767334, | |
"rewards/margins": 6.328906059265137, | |
"rewards/rejected": -12.82087516784668, | |
"step": 770 | |
}, | |
{ | |
"epoch": 0.6927175843694494, | |
"grad_norm": 3.706414222717285, | |
"learning_rate": 2.3430237011767166e-06, | |
"logits/chosen": 2.664365291595459, | |
"logits/rejected": 2.338026285171509, | |
"logps/chosen": -3.8961780071258545, | |
"logps/rejected": -7.658777713775635, | |
"loss": 0.2398, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -5.84426736831665, | |
"rewards/margins": 5.643899917602539, | |
"rewards/rejected": -11.488167762756348, | |
"step": 780 | |
}, | |
{ | |
"epoch": 0.7015985790408525, | |
"grad_norm": 1.7868493795394897, | |
"learning_rate": 2.290805391669212e-06, | |
"logits/chosen": 3.1498961448669434, | |
"logits/rejected": 2.860450267791748, | |
"logps/chosen": -4.245728492736816, | |
"logps/rejected": -8.142782211303711, | |
"loss": 0.184, | |
"rewards/accuracies": 1.0, | |
"rewards/chosen": -6.368593215942383, | |
"rewards/margins": 5.845582008361816, | |
"rewards/rejected": -12.214174270629883, | |
"step": 790 | |
}, | |
{ | |
"epoch": 0.7104795737122558, | |
"grad_norm": 4.593838214874268, | |
"learning_rate": 2.238678841830867e-06, | |
"logits/chosen": 2.4697837829589844, | |
"logits/rejected": 2.062006950378418, | |
"logps/chosen": -4.312434673309326, | |
"logps/rejected": -7.9613471031188965, | |
"loss": 0.2373, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -6.468652248382568, | |
"rewards/margins": 5.4733686447143555, | |
"rewards/rejected": -11.942021369934082, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.7104795737122558, | |
"eval_logits/chosen": 2.5936477184295654, | |
"eval_logits/rejected": 2.3626792430877686, | |
"eval_logps/chosen": -3.92742919921875, | |
"eval_logps/rejected": -8.105939865112305, | |
"eval_loss": 0.24979564547538757, | |
"eval_rewards/accuracies": 0.9230769276618958, | |
"eval_rewards/chosen": -5.891143321990967, | |
"eval_rewards/margins": 6.267765045166016, | |
"eval_rewards/rejected": -12.15890884399414, | |
"eval_runtime": 26.9514, | |
"eval_samples_per_second": 27.012, | |
"eval_steps_per_second": 3.376, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.7193605683836589, | |
"grad_norm": 2.1978909969329834, | |
"learning_rate": 2.186666916089239e-06, | |
"logits/chosen": 2.2280454635620117, | |
"logits/rejected": 2.280139446258545, | |
"logps/chosen": -4.002463340759277, | |
"logps/rejected": -7.377104759216309, | |
"loss": 0.2502, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -6.003695487976074, | |
"rewards/margins": 5.061962127685547, | |
"rewards/rejected": -11.065656661987305, | |
"step": 810 | |
}, | |
{ | |
"epoch": 0.7282415630550622, | |
"grad_norm": 3.911423444747925, | |
"learning_rate": 2.134792428593971e-06, | |
"logits/chosen": 1.973581314086914, | |
"logits/rejected": 1.8624426126480103, | |
"logps/chosen": -3.6720118522644043, | |
"logps/rejected": -7.79262638092041, | |
"loss": 0.249, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -5.5080180168151855, | |
"rewards/margins": 6.18092155456543, | |
"rewards/rejected": -11.688939094543457, | |
"step": 820 | |
}, | |
{ | |
"epoch": 0.7371225577264654, | |
"grad_norm": 4.506085395812988, | |
"learning_rate": 2.0830781332097446e-06, | |
"logits/chosen": 1.825543999671936, | |
"logits/rejected": 1.6684534549713135, | |
"logps/chosen": -3.7447516918182373, | |
"logps/rejected": -7.67331075668335, | |
"loss": 0.21, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -5.617127895355225, | |
"rewards/margins": 5.892838954925537, | |
"rewards/rejected": -11.509965896606445, | |
"step": 830 | |
}, | |
{ | |
"epoch": 0.7460035523978685, | |
"grad_norm": 3.1987173557281494, | |
"learning_rate": 2.031546713535688e-06, | |
"logits/chosen": 2.580594778060913, | |
"logits/rejected": 2.386385917663574, | |
"logps/chosen": -3.8240609169006348, | |
"logps/rejected": -7.2592315673828125, | |
"loss": 0.2173, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -5.736091613769531, | |
"rewards/margins": 5.1527557373046875, | |
"rewards/rejected": -10.888847351074219, | |
"step": 840 | |
}, | |
{ | |
"epoch": 0.7548845470692718, | |
"grad_norm": 3.36380672454834, | |
"learning_rate": 1.9802207729556023e-06, | |
"logits/chosen": 2.644188404083252, | |
"logits/rejected": 2.3516969680786133, | |
"logps/chosen": -3.945826292037964, | |
"logps/rejected": -7.7842698097229, | |
"loss": 0.2176, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -5.918739318847656, | |
"rewards/margins": 5.757664680480957, | |
"rewards/rejected": -11.67640495300293, | |
"step": 850 | |
}, | |
{ | |
"epoch": 0.7548845470692718, | |
"eval_logits/chosen": 2.703193187713623, | |
"eval_logits/rejected": 2.4475576877593994, | |
"eval_logps/chosen": -3.872364044189453, | |
"eval_logps/rejected": -8.104667663574219, | |
"eval_loss": 0.23388977348804474, | |
"eval_rewards/accuracies": 0.9230769276618958, | |
"eval_rewards/chosen": -5.80854606628418, | |
"eval_rewards/margins": 6.34845495223999, | |
"eval_rewards/rejected": -12.157001495361328, | |
"eval_runtime": 26.9104, | |
"eval_samples_per_second": 27.053, | |
"eval_steps_per_second": 3.382, | |
"step": 850 | |
}, | |
{ | |
"epoch": 0.7637655417406749, | |
"grad_norm": 5.758064270019531, | |
"learning_rate": 1.9291228247233607e-06, | |
"logits/chosen": 1.682765245437622, | |
"logits/rejected": 1.4204728603363037, | |
"logps/chosen": -3.7396492958068848, | |
"logps/rejected": -7.976266384124756, | |
"loss": 0.2296, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -5.6094746589660645, | |
"rewards/margins": 6.354925632476807, | |
"rewards/rejected": -11.964399337768555, | |
"step": 860 | |
}, | |
{ | |
"epoch": 0.7726465364120781, | |
"grad_norm": 5.571817874908447, | |
"learning_rate": 1.8782752820878636e-06, | |
"logits/chosen": 3.255483627319336, | |
"logits/rejected": 2.909604787826538, | |
"logps/chosen": -4.347229957580566, | |
"logps/rejected": -8.916404724121094, | |
"loss": 0.1949, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -6.52084493637085, | |
"rewards/margins": 6.853763580322266, | |
"rewards/rejected": -13.374608039855957, | |
"step": 870 | |
}, | |
{ | |
"epoch": 0.7815275310834814, | |
"grad_norm": 4.394683361053467, | |
"learning_rate": 1.827700448461836e-06, | |
"logits/chosen": 2.1075878143310547, | |
"logits/rejected": 1.9253053665161133, | |
"logps/chosen": -4.578610420227051, | |
"logps/rejected": -9.08405876159668, | |
"loss": 0.203, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -6.867916107177734, | |
"rewards/margins": 6.758172512054443, | |
"rewards/rejected": -13.62608814239502, | |
"step": 880 | |
}, | |
{ | |
"epoch": 0.7904085257548845, | |
"grad_norm": 3.40975284576416, | |
"learning_rate": 1.7774205076388207e-06, | |
"logits/chosen": 1.9907169342041016, | |
"logits/rejected": 2.0108611583709717, | |
"logps/chosen": -3.837683916091919, | |
"logps/rejected": -8.051701545715332, | |
"loss": 0.2023, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -5.756525993347168, | |
"rewards/margins": 6.3210248947143555, | |
"rewards/rejected": -12.077550888061523, | |
"step": 890 | |
}, | |
{ | |
"epoch": 0.7992895204262878, | |
"grad_norm": 2.777862548828125, | |
"learning_rate": 1.7274575140626318e-06, | |
"logits/chosen": 2.684932231903076, | |
"logits/rejected": 2.3814995288848877, | |
"logps/chosen": -4.154343128204346, | |
"logps/rejected": -8.115530014038086, | |
"loss": 0.2205, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -6.231514930725098, | |
"rewards/margins": 5.941780090332031, | |
"rewards/rejected": -12.173295021057129, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.7992895204262878, | |
"eval_logits/chosen": 2.7355191707611084, | |
"eval_logits/rejected": 2.505545139312744, | |
"eval_logps/chosen": -4.136618614196777, | |
"eval_logps/rejected": -8.495417594909668, | |
"eval_loss": 0.23354515433311462, | |
"eval_rewards/accuracies": 0.9230769276618958, | |
"eval_rewards/chosen": -6.204929351806641, | |
"eval_rewards/margins": 6.538197040557861, | |
"eval_rewards/rejected": -12.74312686920166, | |
"eval_runtime": 26.9187, | |
"eval_samples_per_second": 27.044, | |
"eval_steps_per_second": 3.381, | |
"step": 900 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 2.1894701230042644e+18, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |