phi3m0128-wds-0.75-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-450
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.3996447602131439, | |
"eval_steps": 50, | |
"global_step": 450, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008880994671403197, | |
"grad_norm": 0.045356735587120056, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 14.352750778198242, | |
"logits/rejected": 14.82281494140625, | |
"logps/chosen": -0.2592294216156006, | |
"logps/rejected": -0.32852867245674133, | |
"loss": 0.9315, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.38884416222572327, | |
"rewards/margins": 0.10394889116287231, | |
"rewards/rejected": -0.4927930235862732, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.017761989342806393, | |
"grad_norm": 0.05255189165472984, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.80792236328125, | |
"logits/rejected": 15.017183303833008, | |
"logps/chosen": -0.2825874388217926, | |
"logps/rejected": -0.36373966932296753, | |
"loss": 0.9318, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.4238811433315277, | |
"rewards/margins": 0.12172831594944, | |
"rewards/rejected": -0.5456094741821289, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02664298401420959, | |
"grad_norm": 0.056027185171842575, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.45336627960205, | |
"logits/rejected": 15.261484146118164, | |
"logps/chosen": -0.2638034522533417, | |
"logps/rejected": -0.37216562032699585, | |
"loss": 0.9211, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -0.3957051932811737, | |
"rewards/margins": 0.1625431776046753, | |
"rewards/rejected": -0.5582484006881714, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.035523978685612786, | |
"grad_norm": 0.06511653959751129, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.59851360321045, | |
"logits/rejected": 15.112770080566406, | |
"logps/chosen": -0.28972768783569336, | |
"logps/rejected": -0.36043626070022583, | |
"loss": 0.941, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.43459147214889526, | |
"rewards/margins": 0.10606291145086288, | |
"rewards/rejected": -0.5406544208526611, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04440497335701599, | |
"grad_norm": 0.0637577474117279, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.678179740905762, | |
"logits/rejected": 15.114399909973145, | |
"logps/chosen": -0.3033604919910431, | |
"logps/rejected": -0.3262741267681122, | |
"loss": 0.929, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.45504075288772583, | |
"rewards/margins": 0.03437047079205513, | |
"rewards/rejected": -0.4894111752510071, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04440497335701599, | |
"eval_logits/chosen": 14.936029434204102, | |
"eval_logits/rejected": 14.780267715454102, | |
"eval_logps/chosen": -0.29386037588119507, | |
"eval_logps/rejected": -0.3304942548274994, | |
"eval_loss": 0.9458721876144409, | |
"eval_rewards/accuracies": 0.49450549483299255, | |
"eval_rewards/chosen": -0.4407905340194702, | |
"eval_rewards/margins": 0.05495081841945648, | |
"eval_rewards/rejected": -0.49574142694473267, | |
"eval_runtime": 27.7436, | |
"eval_samples_per_second": 26.24, | |
"eval_steps_per_second": 3.28, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.05328596802841918, | |
"grad_norm": 0.06470987200737, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": 14.19079303741455, | |
"logits/rejected": 14.986845016479492, | |
"logps/chosen": -0.26126712560653687, | |
"logps/rejected": -0.31976616382598877, | |
"loss": 0.9335, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.3919006288051605, | |
"rewards/margins": 0.08774860948324203, | |
"rewards/rejected": -0.47964924573898315, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.06216696269982238, | |
"grad_norm": 0.06163545697927475, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": 14.062408447265625, | |
"logits/rejected": 15.050743103027344, | |
"logps/chosen": -0.2698076367378235, | |
"logps/rejected": -0.37131980061531067, | |
"loss": 0.9234, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.4047114849090576, | |
"rewards/margins": 0.1522682160139084, | |
"rewards/rejected": -0.556979775428772, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.07104795737122557, | |
"grad_norm": 0.05943402647972107, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": 14.523780822753906, | |
"logits/rejected": 15.173608779907227, | |
"logps/chosen": -0.2835150957107544, | |
"logps/rejected": -0.35379332304000854, | |
"loss": 0.9317, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.4252726137638092, | |
"rewards/margins": 0.10541732609272003, | |
"rewards/rejected": -0.5306899547576904, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07992895204262877, | |
"grad_norm": 0.05566830188035965, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": 14.752237319946289, | |
"logits/rejected": 15.213434219360352, | |
"logps/chosen": -0.25490278005599976, | |
"logps/rejected": -0.31673288345336914, | |
"loss": 0.9276, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.38235417008399963, | |
"rewards/margins": 0.09274514764547348, | |
"rewards/rejected": -0.4750993847846985, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08880994671403197, | |
"grad_norm": 0.07879115641117096, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": 14.362078666687012, | |
"logits/rejected": 14.708786010742188, | |
"logps/chosen": -0.27969443798065186, | |
"logps/rejected": -0.3294224143028259, | |
"loss": 0.9448, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.41954168677330017, | |
"rewards/margins": 0.0745919868350029, | |
"rewards/rejected": -0.4941336512565613, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08880994671403197, | |
"eval_logits/chosen": 14.668691635131836, | |
"eval_logits/rejected": 14.53217601776123, | |
"eval_logps/chosen": -0.2837528884410858, | |
"eval_logps/rejected": -0.33105531334877014, | |
"eval_loss": 0.9382757544517517, | |
"eval_rewards/accuracies": 0.5164835453033447, | |
"eval_rewards/chosen": -0.4256293475627899, | |
"eval_rewards/margins": 0.07095365226268768, | |
"eval_rewards/rejected": -0.4965830445289612, | |
"eval_runtime": 26.9204, | |
"eval_samples_per_second": 27.043, | |
"eval_steps_per_second": 3.38, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09769094138543517, | |
"grad_norm": 0.0677201971411705, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": 14.2677640914917, | |
"logits/rejected": 14.437828063964844, | |
"logps/chosen": -0.26914283633232117, | |
"logps/rejected": -0.3498677909374237, | |
"loss": 0.9307, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.40371423959732056, | |
"rewards/margins": 0.12108743190765381, | |
"rewards/rejected": -0.5248016715049744, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.10657193605683836, | |
"grad_norm": 0.08429163694381714, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": 13.978253364562988, | |
"logits/rejected": 14.673884391784668, | |
"logps/chosen": -0.2842218279838562, | |
"logps/rejected": -0.35421326756477356, | |
"loss": 0.9046, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.4263327121734619, | |
"rewards/margins": 0.10498716682195663, | |
"rewards/rejected": -0.5313198566436768, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.11545293072824156, | |
"grad_norm": 0.08969741314649582, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": 13.98906135559082, | |
"logits/rejected": 14.70555305480957, | |
"logps/chosen": -0.26557397842407227, | |
"logps/rejected": -0.3823702037334442, | |
"loss": 0.9151, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.3983609974384308, | |
"rewards/margins": 0.1751943826675415, | |
"rewards/rejected": -0.5735553503036499, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.12433392539964476, | |
"grad_norm": 0.10470724105834961, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": 13.948530197143555, | |
"logits/rejected": 14.77311897277832, | |
"logps/chosen": -0.2675064504146576, | |
"logps/rejected": -0.3631269335746765, | |
"loss": 0.9053, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.4012596607208252, | |
"rewards/margins": 0.14343078434467316, | |
"rewards/rejected": -0.5446904301643372, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.13321492007104796, | |
"grad_norm": 0.09238290041685104, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": 12.939355850219727, | |
"logits/rejected": 13.387840270996094, | |
"logps/chosen": -0.27794820070266724, | |
"logps/rejected": -0.3520648777484894, | |
"loss": 0.9097, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.41692233085632324, | |
"rewards/margins": 0.11117497831583023, | |
"rewards/rejected": -0.5280972719192505, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13321492007104796, | |
"eval_logits/chosen": 13.123927116394043, | |
"eval_logits/rejected": 13.085403442382812, | |
"eval_logps/chosen": -0.28104570508003235, | |
"eval_logps/rejected": -0.3562033474445343, | |
"eval_loss": 0.9194123148918152, | |
"eval_rewards/accuracies": 0.5824176073074341, | |
"eval_rewards/chosen": -0.42156851291656494, | |
"eval_rewards/margins": 0.11273646354675293, | |
"eval_rewards/rejected": -0.5343050360679626, | |
"eval_runtime": 26.928, | |
"eval_samples_per_second": 27.035, | |
"eval_steps_per_second": 3.379, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.14209591474245115, | |
"grad_norm": 0.14459910988807678, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": 11.937938690185547, | |
"logits/rejected": 13.300163269042969, | |
"logps/chosen": -0.24840429425239563, | |
"logps/rejected": -0.4205872118473053, | |
"loss": 0.8929, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.37260642647743225, | |
"rewards/margins": 0.2582743763923645, | |
"rewards/rejected": -0.6308808326721191, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.15097690941385436, | |
"grad_norm": 0.13749755918979645, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": 12.549962997436523, | |
"logits/rejected": 12.858027458190918, | |
"logps/chosen": -0.2944340109825134, | |
"logps/rejected": -0.38480544090270996, | |
"loss": 0.895, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.44165101647377014, | |
"rewards/margins": 0.1355571448802948, | |
"rewards/rejected": -0.5772081613540649, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.15985790408525755, | |
"grad_norm": 0.14530803263187408, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": 10.892537117004395, | |
"logits/rejected": 11.756756782531738, | |
"logps/chosen": -0.2693363130092621, | |
"logps/rejected": -0.3874126374721527, | |
"loss": 0.8801, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4040044844150543, | |
"rewards/margins": 0.17711447179317474, | |
"rewards/rejected": -0.5811189413070679, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.16873889875666073, | |
"grad_norm": 0.20053793489933014, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": 9.238119125366211, | |
"logits/rejected": 10.233763694763184, | |
"logps/chosen": -0.269479900598526, | |
"logps/rejected": -0.4149019122123718, | |
"loss": 0.8776, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.4042198657989502, | |
"rewards/margins": 0.21813304722309113, | |
"rewards/rejected": -0.6223528385162354, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.17761989342806395, | |
"grad_norm": 0.20254959166049957, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": 9.261235237121582, | |
"logits/rejected": 9.7451753616333, | |
"logps/chosen": -0.29611462354660034, | |
"logps/rejected": -0.44530659914016724, | |
"loss": 0.8596, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.4441719651222229, | |
"rewards/margins": 0.22378793358802795, | |
"rewards/rejected": -0.6679598689079285, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.17761989342806395, | |
"eval_logits/chosen": 8.231317520141602, | |
"eval_logits/rejected": 8.285564422607422, | |
"eval_logps/chosen": -0.3201982080936432, | |
"eval_logps/rejected": -0.4862101078033447, | |
"eval_loss": 0.8602121472358704, | |
"eval_rewards/accuracies": 0.6153846383094788, | |
"eval_rewards/chosen": -0.480297327041626, | |
"eval_rewards/margins": 0.2490178644657135, | |
"eval_rewards/rejected": -0.7293152213096619, | |
"eval_runtime": 26.9054, | |
"eval_samples_per_second": 27.058, | |
"eval_steps_per_second": 3.382, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.18650088809946713, | |
"grad_norm": 0.25219231843948364, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": 7.822943687438965, | |
"logits/rejected": 8.847550392150879, | |
"logps/chosen": -0.3111446797847748, | |
"logps/rejected": -0.5253105163574219, | |
"loss": 0.8553, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.46671706438064575, | |
"rewards/margins": 0.3212486803531647, | |
"rewards/rejected": -0.787965714931488, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.19538188277087035, | |
"grad_norm": 0.29052653908729553, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": 7.404467582702637, | |
"logits/rejected": 7.140469551086426, | |
"logps/chosen": -0.35531798005104065, | |
"logps/rejected": -0.4979163110256195, | |
"loss": 0.8204, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.5329769849777222, | |
"rewards/margins": 0.21389751136302948, | |
"rewards/rejected": -0.7468745112419128, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.20426287744227353, | |
"grad_norm": 0.30769258737564087, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": 5.096299648284912, | |
"logits/rejected": 5.143233299255371, | |
"logps/chosen": -0.3684440553188324, | |
"logps/rejected": -0.557721734046936, | |
"loss": 0.7884, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.5526660084724426, | |
"rewards/margins": 0.2839165925979614, | |
"rewards/rejected": -0.8365826606750488, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.21314387211367672, | |
"grad_norm": 0.3165770471096039, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": 5.376433372497559, | |
"logits/rejected": 4.738249778747559, | |
"logps/chosen": -0.4181212782859802, | |
"logps/rejected": -0.6782273054122925, | |
"loss": 0.7919, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.627181887626648, | |
"rewards/margins": 0.39015907049179077, | |
"rewards/rejected": -1.0173410177230835, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.22202486678507993, | |
"grad_norm": 0.6854519844055176, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": 3.3769917488098145, | |
"logits/rejected": 2.865615129470825, | |
"logps/chosen": -0.42344069480895996, | |
"logps/rejected": -0.8046137094497681, | |
"loss": 0.7456, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -0.6351610422134399, | |
"rewards/margins": 0.5717595219612122, | |
"rewards/rejected": -1.2069203853607178, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.22202486678507993, | |
"eval_logits/chosen": 2.8929049968719482, | |
"eval_logits/rejected": 2.0500316619873047, | |
"eval_logps/chosen": -0.46059882640838623, | |
"eval_logps/rejected": -0.8680218458175659, | |
"eval_loss": 0.7499477863311768, | |
"eval_rewards/accuracies": 0.6373626589775085, | |
"eval_rewards/chosen": -0.6908981204032898, | |
"eval_rewards/margins": 0.6111345887184143, | |
"eval_rewards/rejected": -1.3020328283309937, | |
"eval_runtime": 26.9489, | |
"eval_samples_per_second": 27.014, | |
"eval_steps_per_second": 3.377, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.23090586145648312, | |
"grad_norm": 1.4715200662612915, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": 1.1702392101287842, | |
"logits/rejected": 0.6887636184692383, | |
"logps/chosen": -0.4449438154697418, | |
"logps/rejected": -1.0861380100250244, | |
"loss": 0.6853, | |
"rewards/accuracies": 0.7749999761581421, | |
"rewards/chosen": -0.6674157381057739, | |
"rewards/margins": 0.9617912173271179, | |
"rewards/rejected": -1.6292070150375366, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.23978685612788633, | |
"grad_norm": 1.1773816347122192, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": 2.005816698074341, | |
"logits/rejected": 0.7508751749992371, | |
"logps/chosen": -0.520461916923523, | |
"logps/rejected": -1.2372539043426514, | |
"loss": 0.6514, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -0.7806928157806396, | |
"rewards/margins": 1.0751880407333374, | |
"rewards/rejected": -1.8558809757232666, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.24866785079928952, | |
"grad_norm": 0.4426538348197937, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": 1.3614282608032227, | |
"logits/rejected": 0.17441503703594208, | |
"logps/chosen": -0.628559410572052, | |
"logps/rejected": -1.3841784000396729, | |
"loss": 0.6447, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.9428391456604004, | |
"rewards/margins": 1.1334283351898193, | |
"rewards/rejected": -2.076267719268799, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.25754884547069273, | |
"grad_norm": 0.781704843044281, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": 2.0829949378967285, | |
"logits/rejected": 1.0815263986587524, | |
"logps/chosen": -0.6394428014755249, | |
"logps/rejected": -1.8771930932998657, | |
"loss": 0.5863, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.9591643214225769, | |
"rewards/margins": 1.8566251993179321, | |
"rewards/rejected": -2.8157896995544434, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.2664298401420959, | |
"grad_norm": 1.1327613592147827, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": 1.197505235671997, | |
"logits/rejected": 0.16260084509849548, | |
"logps/chosen": -0.6966903805732727, | |
"logps/rejected": -2.2350592613220215, | |
"loss": 0.5354, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.0450356006622314, | |
"rewards/margins": 2.30755352973938, | |
"rewards/rejected": -3.3525891304016113, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.2664298401420959, | |
"eval_logits/chosen": 1.2078615427017212, | |
"eval_logits/rejected": 0.2717524468898773, | |
"eval_logps/chosen": -0.7531170845031738, | |
"eval_logps/rejected": -2.0594394207000732, | |
"eval_loss": 0.5640697479248047, | |
"eval_rewards/accuracies": 0.6703296899795532, | |
"eval_rewards/chosen": -1.1296755075454712, | |
"eval_rewards/margins": 1.9594837427139282, | |
"eval_rewards/rejected": -3.0891592502593994, | |
"eval_runtime": 26.9506, | |
"eval_samples_per_second": 27.012, | |
"eval_steps_per_second": 3.377, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.2753108348134991, | |
"grad_norm": 0.8920393586158752, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": 1.12120521068573, | |
"logits/rejected": 0.3509993851184845, | |
"logps/chosen": -0.7844308018684387, | |
"logps/rejected": -2.2886409759521484, | |
"loss": 0.5357, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -1.176646113395691, | |
"rewards/margins": 2.256315231323242, | |
"rewards/rejected": -3.4329617023468018, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.2841918294849023, | |
"grad_norm": 0.6658002734184265, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": 1.750349760055542, | |
"logits/rejected": 0.7707412242889404, | |
"logps/chosen": -0.7995473146438599, | |
"logps/rejected": -2.2184109687805176, | |
"loss": 0.5412, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -1.199320912361145, | |
"rewards/margins": 2.128295421600342, | |
"rewards/rejected": -3.3276162147521973, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.29307282415630553, | |
"grad_norm": 1.2708265781402588, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": 1.8776671886444092, | |
"logits/rejected": 1.2588229179382324, | |
"logps/chosen": -0.894809901714325, | |
"logps/rejected": -2.4718618392944336, | |
"loss": 0.5502, | |
"rewards/accuracies": 0.675000011920929, | |
"rewards/chosen": -1.342214822769165, | |
"rewards/margins": 2.3655781745910645, | |
"rewards/rejected": -3.7077927589416504, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.3019538188277087, | |
"grad_norm": 1.2345410585403442, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": 1.9461545944213867, | |
"logits/rejected": 0.9410673975944519, | |
"logps/chosen": -0.891791045665741, | |
"logps/rejected": -2.2217679023742676, | |
"loss": 0.5519, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -1.3376867771148682, | |
"rewards/margins": 1.9949653148651123, | |
"rewards/rejected": -3.3326523303985596, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.3108348134991119, | |
"grad_norm": 11.159003257751465, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": 2.396707534790039, | |
"logits/rejected": 1.4882147312164307, | |
"logps/chosen": -1.1443694829940796, | |
"logps/rejected": -2.867492914199829, | |
"loss": 0.4806, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -1.7165542840957642, | |
"rewards/margins": 2.5846848487854004, | |
"rewards/rejected": -4.301239490509033, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.3108348134991119, | |
"eval_logits/chosen": 1.7898317575454712, | |
"eval_logits/rejected": 1.109007477760315, | |
"eval_logps/chosen": -1.3312609195709229, | |
"eval_logps/rejected": -3.0454280376434326, | |
"eval_loss": 0.4698469638824463, | |
"eval_rewards/accuracies": 0.8131868243217468, | |
"eval_rewards/chosen": -1.9968912601470947, | |
"eval_rewards/margins": 2.5712504386901855, | |
"eval_rewards/rejected": -4.568141937255859, | |
"eval_runtime": 26.9108, | |
"eval_samples_per_second": 27.052, | |
"eval_steps_per_second": 3.382, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.3197158081705151, | |
"grad_norm": 2.2653048038482666, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": 0.36641037464141846, | |
"logits/rejected": -0.4621815085411072, | |
"logps/chosen": -1.5972144603729248, | |
"logps/rejected": -3.1685004234313965, | |
"loss": 0.472, | |
"rewards/accuracies": 0.7875000238418579, | |
"rewards/chosen": -2.3958218097686768, | |
"rewards/margins": 2.356929063796997, | |
"rewards/rejected": -4.752750396728516, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.3285968028419183, | |
"grad_norm": 2.6831283569335938, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": 1.18975830078125, | |
"logits/rejected": 0.7668083310127258, | |
"logps/chosen": -2.1462299823760986, | |
"logps/rejected": -3.7489593029022217, | |
"loss": 0.4169, | |
"rewards/accuracies": 0.8374999761581421, | |
"rewards/chosen": -3.2193446159362793, | |
"rewards/margins": 2.4040939807891846, | |
"rewards/rejected": -5.623438835144043, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.33747779751332146, | |
"grad_norm": 3.5498828887939453, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": 1.8030259609222412, | |
"logits/rejected": 1.4276998043060303, | |
"logps/chosen": -2.189352512359619, | |
"logps/rejected": -3.8717925548553467, | |
"loss": 0.4156, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -3.2840285301208496, | |
"rewards/margins": 2.523660182952881, | |
"rewards/rejected": -5.807689189910889, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.3463587921847247, | |
"grad_norm": 1.46839439868927, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": 2.2764127254486084, | |
"logits/rejected": 1.9199844598770142, | |
"logps/chosen": -2.4463610649108887, | |
"logps/rejected": -4.317194938659668, | |
"loss": 0.4085, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -3.669541597366333, | |
"rewards/margins": 2.806250810623169, | |
"rewards/rejected": -6.475791931152344, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.3552397868561279, | |
"grad_norm": 1.6744616031646729, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": 2.0364136695861816, | |
"logits/rejected": 1.4645214080810547, | |
"logps/chosen": -2.5738539695739746, | |
"logps/rejected": -4.41655158996582, | |
"loss": 0.3723, | |
"rewards/accuracies": 0.887499988079071, | |
"rewards/chosen": -3.8607802391052246, | |
"rewards/margins": 2.764047145843506, | |
"rewards/rejected": -6.6248273849487305, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3552397868561279, | |
"eval_logits/chosen": 2.0340981483459473, | |
"eval_logits/rejected": 1.376452922821045, | |
"eval_logps/chosen": -2.6443424224853516, | |
"eval_logps/rejected": -4.638274192810059, | |
"eval_loss": 0.38738909363746643, | |
"eval_rewards/accuracies": 0.901098906993866, | |
"eval_rewards/chosen": -3.9665138721466064, | |
"eval_rewards/margins": 2.9908969402313232, | |
"eval_rewards/rejected": -6.95741081237793, | |
"eval_runtime": 26.943, | |
"eval_samples_per_second": 27.02, | |
"eval_steps_per_second": 3.377, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3641207815275311, | |
"grad_norm": 24.103384017944336, | |
"learning_rate": 4.133551509975264e-06, | |
"logits/chosen": 1.3653905391693115, | |
"logits/rejected": 0.7307125329971313, | |
"logps/chosen": -2.450911045074463, | |
"logps/rejected": -4.609705924987793, | |
"loss": 0.4031, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -3.6763663291931152, | |
"rewards/margins": 3.2381927967071533, | |
"rewards/rejected": -6.914559364318848, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.37300177619893427, | |
"grad_norm": 2.102792263031006, | |
"learning_rate": 4.093559974371725e-06, | |
"logits/chosen": 1.6425203084945679, | |
"logits/rejected": 0.9890750050544739, | |
"logps/chosen": -2.1248934268951416, | |
"logps/rejected": -3.8592541217803955, | |
"loss": 0.3609, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -3.187340497970581, | |
"rewards/margins": 2.601541042327881, | |
"rewards/rejected": -5.788880825042725, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.38188277087033745, | |
"grad_norm": 2.970210552215576, | |
"learning_rate": 4.052869450695776e-06, | |
"logits/chosen": 2.258129358291626, | |
"logits/rejected": 1.7126191854476929, | |
"logps/chosen": -2.9822497367858887, | |
"logps/rejected": -4.995323657989502, | |
"loss": 0.3669, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -4.473374366760254, | |
"rewards/margins": 3.01961088180542, | |
"rewards/rejected": -7.492985725402832, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.3907637655417407, | |
"grad_norm": 2.3307077884674072, | |
"learning_rate": 4.011497787155938e-06, | |
"logits/chosen": 1.5054914951324463, | |
"logits/rejected": 0.6257806420326233, | |
"logps/chosen": -3.2783126831054688, | |
"logps/rejected": -5.625805854797363, | |
"loss": 0.3428, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -4.917469024658203, | |
"rewards/margins": 3.521239757537842, | |
"rewards/rejected": -8.438708305358887, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.3996447602131439, | |
"grad_norm": 3.2838215827941895, | |
"learning_rate": 3.969463130731183e-06, | |
"logits/chosen": 1.711268424987793, | |
"logits/rejected": 1.4157356023788452, | |
"logps/chosen": -3.0163674354553223, | |
"logps/rejected": -5.020781517028809, | |
"loss": 0.3589, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -4.524550437927246, | |
"rewards/margins": 3.006621837615967, | |
"rewards/rejected": -7.531172752380371, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3996447602131439, | |
"eval_logits/chosen": 2.209981679916382, | |
"eval_logits/rejected": 1.6256438493728638, | |
"eval_logps/chosen": -2.841517925262451, | |
"eval_logps/rejected": -5.154296875, | |
"eval_loss": 0.35665163397789, | |
"eval_rewards/accuracies": 0.9120879173278809, | |
"eval_rewards/chosen": -4.262276649475098, | |
"eval_rewards/margins": 3.4691689014434814, | |
"eval_rewards/rejected": -7.7314453125, | |
"eval_runtime": 26.9493, | |
"eval_samples_per_second": 27.014, | |
"eval_steps_per_second": 3.377, | |
"step": 450 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.0936206119781007e+18, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |