File size: 59,419 Bytes
497cae0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.7265388496468214,
  "eval_steps": 50,
  "global_step": 900,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.008072653884964682,
      "grad_norm": 0.04381619393825531,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": 14.719314575195312,
      "logits/rejected": 15.156938552856445,
      "logps/chosen": -0.2856016755104065,
      "logps/rejected": -0.31895095109939575,
      "loss": 0.9242,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": -0.42840251326560974,
      "rewards/margins": 0.050023891031742096,
      "rewards/rejected": -0.47842639684677124,
      "step": 10
    },
    {
      "epoch": 0.016145307769929364,
      "grad_norm": 0.05155143886804581,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": 14.559402465820312,
      "logits/rejected": 15.32939338684082,
      "logps/chosen": -0.2736968398094177,
      "logps/rejected": -0.3458033502101898,
      "loss": 0.9127,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.4105452597141266,
      "rewards/margins": 0.10815979540348053,
      "rewards/rejected": -0.5187050104141235,
      "step": 20
    },
    {
      "epoch": 0.024217961654894045,
      "grad_norm": 0.05071854218840599,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": 14.653738021850586,
      "logits/rejected": 15.168347358703613,
      "logps/chosen": -0.2985997200012207,
      "logps/rejected": -0.34624338150024414,
      "loss": 0.9141,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": -0.44789963960647583,
      "rewards/margins": 0.07146544009447098,
      "rewards/rejected": -0.5193650722503662,
      "step": 30
    },
    {
      "epoch": 0.03229061553985873,
      "grad_norm": 0.052318744361400604,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": 14.621539115905762,
      "logits/rejected": 15.138806343078613,
      "logps/chosen": -0.27971988916397095,
      "logps/rejected": -0.360626757144928,
      "loss": 0.9313,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.4195798337459564,
      "rewards/margins": 0.12136033922433853,
      "rewards/rejected": -0.5409401655197144,
      "step": 40
    },
    {
      "epoch": 0.04036326942482341,
      "grad_norm": 0.06900553405284882,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": 14.308789253234863,
      "logits/rejected": 14.605737686157227,
      "logps/chosen": -0.2685723304748535,
      "logps/rejected": -0.323064386844635,
      "loss": 0.9076,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.40285855531692505,
      "rewards/margins": 0.08173803985118866,
      "rewards/rejected": -0.4845965802669525,
      "step": 50
    },
    {
      "epoch": 0.04036326942482341,
      "eval_logits/chosen": 14.528907775878906,
      "eval_logits/rejected": 15.016877174377441,
      "eval_logps/chosen": -0.2801212966442108,
      "eval_logps/rejected": -0.34862396121025085,
      "eval_loss": 0.9108895063400269,
      "eval_rewards/accuracies": 0.5544554591178894,
      "eval_rewards/chosen": -0.4201819598674774,
      "eval_rewards/margins": 0.10275395959615707,
      "eval_rewards/rejected": -0.5229359865188599,
      "eval_runtime": 30.01,
      "eval_samples_per_second": 26.691,
      "eval_steps_per_second": 3.366,
      "step": 50
    },
    {
      "epoch": 0.04843592330978809,
      "grad_norm": 0.32321593165397644,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": 14.644981384277344,
      "logits/rejected": 15.177103996276855,
      "logps/chosen": -0.26382654905319214,
      "logps/rejected": -0.33932510018348694,
      "loss": 0.9204,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.3957397937774658,
      "rewards/margins": 0.1132478266954422,
      "rewards/rejected": -0.5089876055717468,
      "step": 60
    },
    {
      "epoch": 0.056508577194752774,
      "grad_norm": 0.07268164306879044,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": 14.562113761901855,
      "logits/rejected": 15.092450141906738,
      "logps/chosen": -0.2856511175632477,
      "logps/rejected": -0.34295767545700073,
      "loss": 0.915,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.42847663164138794,
      "rewards/margins": 0.08595988899469376,
      "rewards/rejected": -0.5144366025924683,
      "step": 70
    },
    {
      "epoch": 0.06458123107971746,
      "grad_norm": 0.06727313250303268,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": 13.979713439941406,
      "logits/rejected": 14.924532890319824,
      "logps/chosen": -0.27184560894966125,
      "logps/rejected": -0.3679867386817932,
      "loss": 0.9223,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.4077683985233307,
      "rewards/margins": 0.14421164989471436,
      "rewards/rejected": -0.5519800186157227,
      "step": 80
    },
    {
      "epoch": 0.07265388496468214,
      "grad_norm": 0.06138753890991211,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": 14.366241455078125,
      "logits/rejected": 14.924840927124023,
      "logps/chosen": -0.2656143009662628,
      "logps/rejected": -0.3583180606365204,
      "loss": 0.9117,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.39842137694358826,
      "rewards/margins": 0.13905569911003113,
      "rewards/rejected": -0.5374771356582642,
      "step": 90
    },
    {
      "epoch": 0.08072653884964683,
      "grad_norm": 0.14299456775188446,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": 14.401769638061523,
      "logits/rejected": 14.532609939575195,
      "logps/chosen": -0.2966740131378174,
      "logps/rejected": -0.3347373604774475,
      "loss": 0.9162,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.4450109899044037,
      "rewards/margins": 0.05709508806467056,
      "rewards/rejected": -0.5021060705184937,
      "step": 100
    },
    {
      "epoch": 0.08072653884964683,
      "eval_logits/chosen": 14.096770286560059,
      "eval_logits/rejected": 14.638699531555176,
      "eval_logps/chosen": -0.2713560461997986,
      "eval_logps/rejected": -0.35128629207611084,
      "eval_loss": 0.900999128818512,
      "eval_rewards/accuracies": 0.5643564462661743,
      "eval_rewards/chosen": -0.4070340394973755,
      "eval_rewards/margins": 0.11989541351795197,
      "eval_rewards/rejected": -0.5269294381141663,
      "eval_runtime": 29.986,
      "eval_samples_per_second": 26.712,
      "eval_steps_per_second": 3.368,
      "step": 100
    },
    {
      "epoch": 0.08879919273461151,
      "grad_norm": 0.0759090781211853,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": 13.392621994018555,
      "logits/rejected": 14.395462036132812,
      "logps/chosen": -0.22954440116882324,
      "logps/rejected": -0.36977845430374146,
      "loss": 0.8951,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.3443165421485901,
      "rewards/margins": 0.21035107970237732,
      "rewards/rejected": -0.5546677112579346,
      "step": 110
    },
    {
      "epoch": 0.09687184661957618,
      "grad_norm": 0.155408576130867,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": 13.7767915725708,
      "logits/rejected": 14.654029846191406,
      "logps/chosen": -0.2693817615509033,
      "logps/rejected": -0.38339418172836304,
      "loss": 0.9012,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.40407267212867737,
      "rewards/margins": 0.17101867496967316,
      "rewards/rejected": -0.5750913619995117,
      "step": 120
    },
    {
      "epoch": 0.10494450050454086,
      "grad_norm": 0.31760165095329285,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": 13.619878768920898,
      "logits/rejected": 14.147298812866211,
      "logps/chosen": -0.2787878215312958,
      "logps/rejected": -0.35886240005493164,
      "loss": 0.8911,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.41818171739578247,
      "rewards/margins": 0.12011190503835678,
      "rewards/rejected": -0.5382936000823975,
      "step": 130
    },
    {
      "epoch": 0.11301715438950555,
      "grad_norm": 0.10735614597797394,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": 13.27166748046875,
      "logits/rejected": 13.826273918151855,
      "logps/chosen": -0.25437131524086,
      "logps/rejected": -0.3877837061882019,
      "loss": 0.8915,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.3815569281578064,
      "rewards/margins": 0.20011858642101288,
      "rewards/rejected": -0.5816755890846252,
      "step": 140
    },
    {
      "epoch": 0.12108980827447023,
      "grad_norm": 0.12984605133533478,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": 12.981040954589844,
      "logits/rejected": 13.055997848510742,
      "logps/chosen": -0.27253809571266174,
      "logps/rejected": -0.32365134358406067,
      "loss": 0.8954,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": -0.4088071286678314,
      "rewards/margins": 0.07666991651058197,
      "rewards/rejected": -0.4854770302772522,
      "step": 150
    },
    {
      "epoch": 0.12108980827447023,
      "eval_logits/chosen": 12.205692291259766,
      "eval_logits/rejected": 12.830544471740723,
      "eval_logps/chosen": -0.2716449201107025,
      "eval_logps/rejected": -0.37988847494125366,
      "eval_loss": 0.8780961036682129,
      "eval_rewards/accuracies": 0.5841584205627441,
      "eval_rewards/chosen": -0.40746742486953735,
      "eval_rewards/margins": 0.16236530244350433,
      "eval_rewards/rejected": -0.5698326826095581,
      "eval_runtime": 30.0006,
      "eval_samples_per_second": 26.699,
      "eval_steps_per_second": 3.367,
      "step": 150
    },
    {
      "epoch": 0.12916246215943492,
      "grad_norm": 0.1466989368200302,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": 12.592086791992188,
      "logits/rejected": 12.590131759643555,
      "logps/chosen": -0.2901991307735443,
      "logps/rejected": -0.37141314148902893,
      "loss": 0.8814,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.4352986812591553,
      "rewards/margins": 0.12182100117206573,
      "rewards/rejected": -0.5571196675300598,
      "step": 160
    },
    {
      "epoch": 0.13723511604439959,
      "grad_norm": 0.15794874727725983,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": 10.324287414550781,
      "logits/rejected": 11.248865127563477,
      "logps/chosen": -0.24505829811096191,
      "logps/rejected": -0.4437941610813141,
      "loss": 0.8739,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.3675874173641205,
      "rewards/margins": 0.29810377955436707,
      "rewards/rejected": -0.6656912565231323,
      "step": 170
    },
    {
      "epoch": 0.14530776992936428,
      "grad_norm": 0.34027722477912903,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": 9.277830123901367,
      "logits/rejected": 10.56584644317627,
      "logps/chosen": -0.29055729508399963,
      "logps/rejected": -0.4694874882698059,
      "loss": 0.8732,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.43583592772483826,
      "rewards/margins": 0.2683953046798706,
      "rewards/rejected": -0.7042312026023865,
      "step": 180
    },
    {
      "epoch": 0.15338042381432895,
      "grad_norm": 0.21653155982494354,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": 6.692442417144775,
      "logits/rejected": 8.371492385864258,
      "logps/chosen": -0.2739722728729248,
      "logps/rejected": -0.5168331265449524,
      "loss": 0.8425,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.4109583795070648,
      "rewards/margins": 0.36429136991500854,
      "rewards/rejected": -0.7752498388290405,
      "step": 190
    },
    {
      "epoch": 0.16145307769929365,
      "grad_norm": 0.27401649951934814,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": 7.028637886047363,
      "logits/rejected": 7.22598123550415,
      "logps/chosen": -0.32309776544570923,
      "logps/rejected": -0.5094671249389648,
      "loss": 0.8327,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.48464661836624146,
      "rewards/margins": 0.2795540988445282,
      "rewards/rejected": -0.7642006874084473,
      "step": 200
    },
    {
      "epoch": 0.16145307769929365,
      "eval_logits/chosen": 6.369185924530029,
      "eval_logits/rejected": 6.641132831573486,
      "eval_logps/chosen": -0.32840561866760254,
      "eval_logps/rejected": -0.5301258563995361,
      "eval_loss": 0.8103437423706055,
      "eval_rewards/accuracies": 0.6237623691558838,
      "eval_rewards/chosen": -0.4926084876060486,
      "eval_rewards/margins": 0.30258041620254517,
      "eval_rewards/rejected": -0.795188844203949,
      "eval_runtime": 29.9886,
      "eval_samples_per_second": 26.71,
      "eval_steps_per_second": 3.368,
      "step": 200
    },
    {
      "epoch": 0.16952573158425832,
      "grad_norm": 0.3183073103427887,
      "learning_rate": 4.762067631165049e-06,
      "logits/chosen": 5.25254487991333,
      "logits/rejected": 5.84013032913208,
      "logps/chosen": -0.3624248802661896,
      "logps/rejected": -0.6147049069404602,
      "loss": 0.7877,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.5436373949050903,
      "rewards/margins": 0.3784201443195343,
      "rewards/rejected": -0.9220573306083679,
      "step": 210
    },
    {
      "epoch": 0.17759838546922302,
      "grad_norm": 0.3535729646682739,
      "learning_rate": 4.7392794005985324e-06,
      "logits/chosen": 4.473980903625488,
      "logits/rejected": 3.9927191734313965,
      "logps/chosen": -0.3647093176841736,
      "logps/rejected": -0.6410630345344543,
      "loss": 0.7816,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.547063946723938,
      "rewards/margins": 0.4145306646823883,
      "rewards/rejected": -0.9615945816040039,
      "step": 220
    },
    {
      "epoch": 0.1856710393541877,
      "grad_norm": 0.4819677174091339,
      "learning_rate": 4.715508948078037e-06,
      "logits/chosen": 2.7333035469055176,
      "logits/rejected": 2.521853446960449,
      "logps/chosen": -0.40259629487991333,
      "logps/rejected": -0.7537732720375061,
      "loss": 0.7306,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.6038944721221924,
      "rewards/margins": 0.5267654657363892,
      "rewards/rejected": -1.1306599378585815,
      "step": 230
    },
    {
      "epoch": 0.19374369323915236,
      "grad_norm": 0.4125296175479889,
      "learning_rate": 4.690766700109659e-06,
      "logits/chosen": 2.212467908859253,
      "logits/rejected": 1.1434030532836914,
      "logps/chosen": -0.4652811884880066,
      "logps/rejected": -0.8928227424621582,
      "loss": 0.7214,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.6979218125343323,
      "rewards/margins": 0.6413123607635498,
      "rewards/rejected": -1.3392341136932373,
      "step": 240
    },
    {
      "epoch": 0.20181634712411706,
      "grad_norm": 0.4265546202659607,
      "learning_rate": 4.665063509461098e-06,
      "logits/chosen": 0.4756811559200287,
      "logits/rejected": 0.07218921184539795,
      "logps/chosen": -0.4880926012992859,
      "logps/rejected": -1.0095646381378174,
      "loss": 0.6811,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.7321388721466064,
      "rewards/margins": 0.7822080850601196,
      "rewards/rejected": -1.5143468379974365,
      "step": 250
    },
    {
      "epoch": 0.20181634712411706,
      "eval_logits/chosen": 1.6732138395309448,
      "eval_logits/rejected": 0.5167235732078552,
      "eval_logps/chosen": -0.5383209586143494,
      "eval_logps/rejected": -1.0026048421859741,
      "eval_loss": 0.6842760443687439,
      "eval_rewards/accuracies": 0.6336633563041687,
      "eval_rewards/chosen": -0.8074814677238464,
      "eval_rewards/margins": 0.6964258551597595,
      "eval_rewards/rejected": -1.5039072036743164,
      "eval_runtime": 29.9884,
      "eval_samples_per_second": 26.71,
      "eval_steps_per_second": 3.368,
      "step": 250
    },
    {
      "epoch": 0.20988900100908173,
      "grad_norm": 0.5196985006332397,
      "learning_rate": 4.638410650401267e-06,
      "logits/chosen": 1.7947940826416016,
      "logits/rejected": 0.9839111566543579,
      "logps/chosen": -0.6005308032035828,
      "logps/rejected": -0.9484688639640808,
      "loss": 0.7141,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.9007962942123413,
      "rewards/margins": 0.5219069719314575,
      "rewards/rejected": -1.4227031469345093,
      "step": 260
    },
    {
      "epoch": 0.21796165489404642,
      "grad_norm": 1.302403450012207,
      "learning_rate": 4.610819813755038e-06,
      "logits/chosen": 2.2894890308380127,
      "logits/rejected": 1.2887728214263916,
      "logps/chosen": -0.5904151797294617,
      "logps/rejected": -1.1889005899429321,
      "loss": 0.6647,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.8856227993965149,
      "rewards/margins": 0.8977279663085938,
      "rewards/rejected": -1.783350944519043,
      "step": 270
    },
    {
      "epoch": 0.2260343087790111,
      "grad_norm": 0.7729688286781311,
      "learning_rate": 4.582303101775249e-06,
      "logits/chosen": 0.4874440133571625,
      "logits/rejected": -0.3855375349521637,
      "logps/chosen": -0.624158501625061,
      "logps/rejected": -1.4413455724716187,
      "loss": 0.5629,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.9362378120422363,
      "rewards/margins": 1.2257804870605469,
      "rewards/rejected": -2.162018299102783,
      "step": 280
    },
    {
      "epoch": 0.2341069626639758,
      "grad_norm": 0.41621893644332886,
      "learning_rate": 4.55287302283426e-06,
      "logits/chosen": 1.3461151123046875,
      "logits/rejected": 0.733107328414917,
      "logps/chosen": -0.7516278624534607,
      "logps/rejected": -1.6215450763702393,
      "loss": 0.5544,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -1.1274420022964478,
      "rewards/margins": 1.3048756122589111,
      "rewards/rejected": -2.4323174953460693,
      "step": 290
    },
    {
      "epoch": 0.24217961654894046,
      "grad_norm": 0.45633476972579956,
      "learning_rate": 4.522542485937369e-06,
      "logits/chosen": 0.6991375684738159,
      "logits/rejected": 0.1667344868183136,
      "logps/chosen": -0.7924041152000427,
      "logps/rejected": -2.521883249282837,
      "loss": 0.4968,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -1.1886063814163208,
      "rewards/margins": 2.5942187309265137,
      "rewards/rejected": -3.782824754714966,
      "step": 300
    },
    {
      "epoch": 0.24217961654894046,
      "eval_logits/chosen": 1.107033610343933,
      "eval_logits/rejected": 0.10493909567594528,
      "eval_logps/chosen": -0.8693537712097168,
      "eval_logps/rejected": -2.1045310497283936,
      "eval_loss": 0.4899609684944153,
      "eval_rewards/accuracies": 0.6633663177490234,
      "eval_rewards/chosen": -1.3040307760238647,
      "eval_rewards/margins": 1.852765679359436,
      "eval_rewards/rejected": -3.15679669380188,
      "eval_runtime": 30.0102,
      "eval_samples_per_second": 26.691,
      "eval_steps_per_second": 3.366,
      "step": 300
    },
    {
      "epoch": 0.25025227043390513,
      "grad_norm": 2.2053284645080566,
      "learning_rate": 4.491324795060491e-06,
      "logits/chosen": 0.6556342244148254,
      "logits/rejected": -0.14924369752407074,
      "logps/chosen": -0.8949082493782043,
      "logps/rejected": -2.1485395431518555,
      "loss": 0.5372,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -1.342362403869629,
      "rewards/margins": 1.8804467916488647,
      "rewards/rejected": -3.222809314727783,
      "step": 310
    },
    {
      "epoch": 0.25832492431886983,
      "grad_norm": 0.8357079029083252,
      "learning_rate": 4.4592336433146e-06,
      "logits/chosen": 1.4236268997192383,
      "logits/rejected": 0.44775086641311646,
      "logps/chosen": -0.8566747903823853,
      "logps/rejected": -2.4265379905700684,
      "loss": 0.4974,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -1.285012125968933,
      "rewards/margins": 2.35479474067688,
      "rewards/rejected": -3.6398072242736816,
      "step": 320
    },
    {
      "epoch": 0.26639757820383453,
      "grad_norm": 1.5514745712280273,
      "learning_rate": 4.426283106939474e-06,
      "logits/chosen": 1.0238934755325317,
      "logits/rejected": 0.31885427236557007,
      "logps/chosen": -0.9286335110664368,
      "logps/rejected": -2.957723379135132,
      "loss": 0.4777,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -1.3929500579833984,
      "rewards/margins": 3.043635129928589,
      "rewards/rejected": -4.436585426330566,
      "step": 330
    },
    {
      "epoch": 0.27447023208879917,
      "grad_norm": 0.7523798942565918,
      "learning_rate": 4.3924876391293915e-06,
      "logits/chosen": 1.0386161804199219,
      "logits/rejected": 0.1279783844947815,
      "logps/chosen": -1.0053694248199463,
      "logps/rejected": -2.8961727619171143,
      "loss": 0.4718,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -1.5080541372299194,
      "rewards/margins": 2.8362045288085938,
      "rewards/rejected": -4.344258785247803,
      "step": 340
    },
    {
      "epoch": 0.28254288597376387,
      "grad_norm": 0.6102933287620544,
      "learning_rate": 4.357862063693486e-06,
      "logits/chosen": 0.5982325077056885,
      "logits/rejected": 0.07386422157287598,
      "logps/chosen": -1.0740950107574463,
      "logps/rejected": -2.4773449897766113,
      "loss": 0.4909,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.6111425161361694,
      "rewards/margins": 2.104874849319458,
      "rewards/rejected": -3.716017246246338,
      "step": 350
    },
    {
      "epoch": 0.28254288597376387,
      "eval_logits/chosen": 1.3659090995788574,
      "eval_logits/rejected": 0.5649093985557556,
      "eval_logps/chosen": -1.113811731338501,
      "eval_logps/rejected": -2.65985107421875,
      "eval_loss": 0.44899094104766846,
      "eval_rewards/accuracies": 0.6633663177490234,
      "eval_rewards/chosen": -1.6707175970077515,
      "eval_rewards/margins": 2.319058895111084,
      "eval_rewards/rejected": -3.989776849746704,
      "eval_runtime": 29.9887,
      "eval_samples_per_second": 26.71,
      "eval_steps_per_second": 3.368,
      "step": 350
    },
    {
      "epoch": 0.29061553985872857,
      "grad_norm": 0.4950815737247467,
      "learning_rate": 4.322421568553529e-06,
      "logits/chosen": 1.4646549224853516,
      "logits/rejected": 1.0656757354736328,
      "logps/chosen": -1.0681949853897095,
      "logps/rejected": -2.9191997051239014,
      "loss": 0.4561,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.6022924184799194,
      "rewards/margins": 2.7765071392059326,
      "rewards/rejected": -4.3787994384765625,
      "step": 360
    },
    {
      "epoch": 0.29868819374369326,
      "grad_norm": 1.830091118812561,
      "learning_rate": 4.286181699082008e-06,
      "logits/chosen": 2.0835390090942383,
      "logits/rejected": 1.3285930156707764,
      "logps/chosen": -1.1288923025131226,
      "logps/rejected": -3.2559380531311035,
      "loss": 0.4305,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.693338394165039,
      "rewards/margins": 3.190568208694458,
      "rewards/rejected": -4.883906364440918,
      "step": 370
    },
    {
      "epoch": 0.3067608476286579,
      "grad_norm": 2.1292569637298584,
      "learning_rate": 4.249158351283414e-06,
      "logits/chosen": 1.5609261989593506,
      "logits/rejected": 1.0038378238677979,
      "logps/chosen": -1.2937371730804443,
      "logps/rejected": -3.3292288780212402,
      "loss": 0.4358,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.9406057596206665,
      "rewards/margins": 3.0532374382019043,
      "rewards/rejected": -4.993843078613281,
      "step": 380
    },
    {
      "epoch": 0.3148335015136226,
      "grad_norm": 2.124483346939087,
      "learning_rate": 4.211367764821722e-06,
      "logits/chosen": 1.9905683994293213,
      "logits/rejected": 1.498375415802002,
      "logps/chosen": -1.4837720394134521,
      "logps/rejected": -3.7814183235168457,
      "loss": 0.4565,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -2.225658416748047,
      "rewards/margins": 3.44646954536438,
      "rewards/rejected": -5.672127723693848,
      "step": 390
    },
    {
      "epoch": 0.3229061553985873,
      "grad_norm": 1.8990832567214966,
      "learning_rate": 4.172826515897146e-06,
      "logits/chosen": 2.05663800239563,
      "logits/rejected": 1.7521194219589233,
      "logps/chosen": -1.662553071975708,
      "logps/rejected": -4.254827976226807,
      "loss": 0.408,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -2.4938297271728516,
      "rewards/margins": 3.8884124755859375,
      "rewards/rejected": -6.382241725921631,
      "step": 400
    },
    {
      "epoch": 0.3229061553985873,
      "eval_logits/chosen": 1.7070311307907104,
      "eval_logits/rejected": 1.3909664154052734,
      "eval_logps/chosen": -2.0459556579589844,
      "eval_logps/rejected": -4.069729804992676,
      "eval_loss": 0.38578492403030396,
      "eval_rewards/accuracies": 0.8613861203193665,
      "eval_rewards/chosen": -3.0689334869384766,
      "eval_rewards/margins": 3.035661458969116,
      "eval_rewards/rejected": -6.104594707489014,
      "eval_runtime": 30.015,
      "eval_samples_per_second": 26.687,
      "eval_steps_per_second": 3.365,
      "step": 400
    },
    {
      "epoch": 0.33097880928355194,
      "grad_norm": 2.627016067504883,
      "learning_rate": 4.133551509975264e-06,
      "logits/chosen": 1.8818864822387695,
      "logits/rejected": 1.6801849603652954,
      "logps/chosen": -2.145986318588257,
      "logps/rejected": -4.5329155921936035,
      "loss": 0.3894,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -3.218979597091675,
      "rewards/margins": 3.5803935527801514,
      "rewards/rejected": -6.799372673034668,
      "step": 410
    },
    {
      "epoch": 0.33905146316851664,
      "grad_norm": 2.1047608852386475,
      "learning_rate": 4.093559974371725e-06,
      "logits/chosen": 2.082624912261963,
      "logits/rejected": 1.7804310321807861,
      "logps/chosen": -2.498039722442627,
      "logps/rejected": -4.982306957244873,
      "loss": 0.3853,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -3.7470593452453613,
      "rewards/margins": 3.7264015674591064,
      "rewards/rejected": -7.4734601974487305,
      "step": 420
    },
    {
      "epoch": 0.34712411705348134,
      "grad_norm": 1.7552043199539185,
      "learning_rate": 4.052869450695776e-06,
      "logits/chosen": 2.693586826324463,
      "logits/rejected": 2.2467360496520996,
      "logps/chosen": -2.717463731765747,
      "logps/rejected": -5.170213222503662,
      "loss": 0.3669,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -4.07619571685791,
      "rewards/margins": 3.6791248321533203,
      "rewards/rejected": -7.755320072174072,
      "step": 430
    },
    {
      "epoch": 0.35519677093844604,
      "grad_norm": 1.5780164003372192,
      "learning_rate": 4.011497787155938e-06,
      "logits/chosen": 1.6175695657730103,
      "logits/rejected": 1.4590227603912354,
      "logps/chosen": -2.6292812824249268,
      "logps/rejected": -5.3910956382751465,
      "loss": 0.3333,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -3.9439215660095215,
      "rewards/margins": 4.142721176147461,
      "rewards/rejected": -8.086642265319824,
      "step": 440
    },
    {
      "epoch": 0.3632694248234107,
      "grad_norm": 4.690535545349121,
      "learning_rate": 3.969463130731183e-06,
      "logits/chosen": 1.8307468891143799,
      "logits/rejected": 1.500794768333435,
      "logps/chosen": -2.872072696685791,
      "logps/rejected": -5.305605411529541,
      "loss": 0.356,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -4.308108329772949,
      "rewards/margins": 3.6502983570098877,
      "rewards/rejected": -7.958407402038574,
      "step": 450
    },
    {
      "epoch": 0.3632694248234107,
      "eval_logits/chosen": 1.702280044555664,
      "eval_logits/rejected": 1.3865309953689575,
      "eval_logps/chosen": -2.8212814331054688,
      "eval_logps/rejected": -5.271735668182373,
      "eval_loss": 0.33659350872039795,
      "eval_rewards/accuracies": 0.9009901285171509,
      "eval_rewards/chosen": -4.231922149658203,
      "eval_rewards/margins": 3.6756813526153564,
      "eval_rewards/rejected": -7.9076032638549805,
      "eval_runtime": 30.0131,
      "eval_samples_per_second": 26.688,
      "eval_steps_per_second": 3.365,
      "step": 450
    },
    {
      "epoch": 0.3713420787083754,
      "grad_norm": 2.641991138458252,
      "learning_rate": 3.92678391921108e-06,
      "logits/chosen": 2.2038960456848145,
      "logits/rejected": 1.8302087783813477,
      "logps/chosen": -3.0162768363952637,
      "logps/rejected": -5.90580940246582,
      "loss": 0.3141,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -4.524415493011475,
      "rewards/margins": 4.334298610687256,
      "rewards/rejected": -8.85871410369873,
      "step": 460
    },
    {
      "epoch": 0.3794147325933401,
      "grad_norm": 1.9930158853530884,
      "learning_rate": 3.88347887310836e-06,
      "logits/chosen": 1.655861258506775,
      "logits/rejected": 1.562140703201294,
      "logps/chosen": -2.6926944255828857,
      "logps/rejected": -5.58877420425415,
      "loss": 0.3298,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -4.039041519165039,
      "rewards/margins": 4.344119548797607,
      "rewards/rejected": -8.383161544799805,
      "step": 470
    },
    {
      "epoch": 0.3874873864783047,
      "grad_norm": 2.796666383743286,
      "learning_rate": 3.839566987447492e-06,
      "logits/chosen": 1.465962290763855,
      "logits/rejected": 1.0425068140029907,
      "logps/chosen": -2.9049878120422363,
      "logps/rejected": -5.620814323425293,
      "loss": 0.3071,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -4.357481956481934,
      "rewards/margins": 4.073739051818848,
      "rewards/rejected": -8.431221961975098,
      "step": 480
    },
    {
      "epoch": 0.3955600403632694,
      "grad_norm": 3.4124133586883545,
      "learning_rate": 3.795067523432826e-06,
      "logits/chosen": 1.545910120010376,
      "logits/rejected": 1.2031619548797607,
      "logps/chosen": -3.081360340118408,
      "logps/rejected": -6.2000017166137695,
      "loss": 0.2638,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -4.622040748596191,
      "rewards/margins": 4.677962779998779,
      "rewards/rejected": -9.300004959106445,
      "step": 490
    },
    {
      "epoch": 0.4036326942482341,
      "grad_norm": 2.4986650943756104,
      "learning_rate": 3.7500000000000005e-06,
      "logits/chosen": 2.3995444774627686,
      "logits/rejected": 2.111048698425293,
      "logps/chosen": -3.1852879524230957,
      "logps/rejected": -5.824184417724609,
      "loss": 0.2592,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -4.777932167053223,
      "rewards/margins": 3.9583442211151123,
      "rewards/rejected": -8.736276626586914,
      "step": 500
    },
    {
      "epoch": 0.4036326942482341,
      "eval_logits/chosen": 2.3460052013397217,
      "eval_logits/rejected": 1.8444068431854248,
      "eval_logps/chosen": -3.00115704536438,
      "eval_logps/rejected": -5.741134166717529,
      "eval_loss": 0.3161226809024811,
      "eval_rewards/accuracies": 0.9009901285171509,
      "eval_rewards/chosen": -4.501734733581543,
      "eval_rewards/margins": 4.1099653244018555,
      "eval_rewards/rejected": -8.611700057983398,
      "eval_runtime": 30.0035,
      "eval_samples_per_second": 26.697,
      "eval_steps_per_second": 3.366,
      "step": 500
    },
    {
      "epoch": 0.4117053481331988,
      "grad_norm": 2.1488800048828125,
      "learning_rate": 3.7043841852542884e-06,
      "logits/chosen": 2.289475440979004,
      "logits/rejected": 2.1274220943450928,
      "logps/chosen": -3.0586507320404053,
      "logps/rejected": -6.597572326660156,
      "loss": 0.2772,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -4.587975978851318,
      "rewards/margins": 5.308382034301758,
      "rewards/rejected": -9.89635944366455,
      "step": 510
    },
    {
      "epoch": 0.41977800201816345,
      "grad_norm": 4.558802604675293,
      "learning_rate": 3.658240087799655e-06,
      "logits/chosen": 2.7750000953674316,
      "logits/rejected": 2.718843936920166,
      "logps/chosen": -3.142796039581299,
      "logps/rejected": -6.112117767333984,
      "loss": 0.2724,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -4.714193820953369,
      "rewards/margins": 4.453982830047607,
      "rewards/rejected": -9.168176651000977,
      "step": 520
    },
    {
      "epoch": 0.42785065590312815,
      "grad_norm": 4.052336692810059,
      "learning_rate": 3.611587947962319e-06,
      "logits/chosen": 2.6630032062530518,
      "logits/rejected": 2.3567159175872803,
      "logps/chosen": -2.735839605331421,
      "logps/rejected": -5.979222774505615,
      "loss": 0.2616,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -4.103759288787842,
      "rewards/margins": 4.865074634552002,
      "rewards/rejected": -8.968833923339844,
      "step": 530
    },
    {
      "epoch": 0.43592330978809285,
      "grad_norm": 4.516399383544922,
      "learning_rate": 3.564448228912682e-06,
      "logits/chosen": 2.0932493209838867,
      "logits/rejected": 2.0154340267181396,
      "logps/chosen": -3.2643024921417236,
      "logps/rejected": -6.601607322692871,
      "loss": 0.2954,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -4.896453857421875,
      "rewards/margins": 5.00595760345459,
      "rewards/rejected": -9.902410507202148,
      "step": 540
    },
    {
      "epoch": 0.4439959636730575,
      "grad_norm": 5.320217132568359,
      "learning_rate": 3.516841607689501e-06,
      "logits/chosen": 2.524789333343506,
      "logits/rejected": 2.2494006156921387,
      "logps/chosen": -3.190767526626587,
      "logps/rejected": -6.47235107421875,
      "loss": 0.26,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -4.786150932312012,
      "rewards/margins": 4.922375679016113,
      "rewards/rejected": -9.708526611328125,
      "step": 550
    },
    {
      "epoch": 0.4439959636730575,
      "eval_logits/chosen": 2.328671932220459,
      "eval_logits/rejected": 1.9636019468307495,
      "eval_logps/chosen": -3.0293216705322266,
      "eval_logps/rejected": -6.1273980140686035,
      "eval_loss": 0.28268176317214966,
      "eval_rewards/accuracies": 0.9405940771102905,
      "eval_rewards/chosen": -4.54398250579834,
      "eval_rewards/margins": 4.64711332321167,
      "eval_rewards/rejected": -9.191096305847168,
      "eval_runtime": 30.0135,
      "eval_samples_per_second": 26.688,
      "eval_steps_per_second": 3.365,
      "step": 550
    },
    {
      "epoch": 0.4520686175580222,
      "grad_norm": 2.362356424331665,
      "learning_rate": 3.4687889661302577e-06,
      "logits/chosen": 2.4379239082336426,
      "logits/rejected": 2.2238965034484863,
      "logps/chosen": -3.121654510498047,
      "logps/rejected": -6.944204807281494,
      "loss": 0.2752,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -4.68248176574707,
      "rewards/margins": 5.733824729919434,
      "rewards/rejected": -10.41630744934082,
      "step": 560
    },
    {
      "epoch": 0.4601412714429869,
      "grad_norm": 6.080317497253418,
      "learning_rate": 3.4203113817116955e-06,
      "logits/chosen": 1.9741709232330322,
      "logits/rejected": 1.7863889932632446,
      "logps/chosen": -3.5348823070526123,
      "logps/rejected": -7.0638427734375,
      "loss": 0.2666,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -5.302323818206787,
      "rewards/margins": 5.293439865112305,
      "rewards/rejected": -10.595763206481934,
      "step": 570
    },
    {
      "epoch": 0.4682139253279516,
      "grad_norm": 2.5477423667907715,
      "learning_rate": 3.3714301183045382e-06,
      "logits/chosen": 2.9160943031311035,
      "logits/rejected": 2.747884750366211,
      "logps/chosen": -3.287876844406128,
      "logps/rejected": -6.996206760406494,
      "loss": 0.2362,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -4.9318156242370605,
      "rewards/margins": 5.562494277954102,
      "rewards/rejected": -10.494308471679688,
      "step": 580
    },
    {
      "epoch": 0.47628657921291623,
      "grad_norm": 2.0826616287231445,
      "learning_rate": 3.3221666168464584e-06,
      "logits/chosen": 2.3059380054473877,
      "logits/rejected": 2.1479601860046387,
      "logps/chosen": -3.25227689743042,
      "logps/rejected": -6.894504547119141,
      "loss": 0.2774,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -4.878415584564209,
      "rewards/margins": 5.46334171295166,
      "rewards/rejected": -10.341755867004395,
      "step": 590
    },
    {
      "epoch": 0.4843592330978809,
      "grad_norm": 2.689568519592285,
      "learning_rate": 3.272542485937369e-06,
      "logits/chosen": 2.776691436767578,
      "logits/rejected": 2.309044361114502,
      "logps/chosen": -3.4026119709014893,
      "logps/rejected": -6.8883819580078125,
      "loss": 0.3082,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -5.103917121887207,
      "rewards/margins": 5.228655815124512,
      "rewards/rejected": -10.332572937011719,
      "step": 600
    },
    {
      "epoch": 0.4843592330978809,
      "eval_logits/chosen": 2.1815295219421387,
      "eval_logits/rejected": 1.8988540172576904,
      "eval_logps/chosen": -3.1169536113739014,
      "eval_logps/rejected": -6.34333610534668,
      "eval_loss": 0.2698034942150116,
      "eval_rewards/accuracies": 0.9504950642585754,
      "eval_rewards/chosen": -4.6754302978515625,
      "eval_rewards/margins": 4.839573860168457,
      "eval_rewards/rejected": -9.51500415802002,
      "eval_runtime": 30.0083,
      "eval_samples_per_second": 26.693,
      "eval_steps_per_second": 3.366,
      "step": 600
    },
    {
      "epoch": 0.4924318869828456,
      "grad_norm": 3.95121169090271,
      "learning_rate": 3.222579492361179e-06,
      "logits/chosen": 2.4650866985321045,
      "logits/rejected": 2.0424320697784424,
      "logps/chosen": -3.28851580619812,
      "logps/rejected": -6.309131622314453,
      "loss": 0.267,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -4.932773590087891,
      "rewards/margins": 4.530923366546631,
      "rewards/rejected": -9.46369743347168,
      "step": 610
    },
    {
      "epoch": 0.5005045408678103,
      "grad_norm": 3.0987000465393066,
      "learning_rate": 3.1722995515381644e-06,
      "logits/chosen": 2.4938290119171143,
      "logits/rejected": 2.324687957763672,
      "logps/chosen": -3.5282440185546875,
      "logps/rejected": -6.8709845542907715,
      "loss": 0.2849,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -5.292366027832031,
      "rewards/margins": 5.014110565185547,
      "rewards/rejected": -10.306477546691895,
      "step": 620
    },
    {
      "epoch": 0.508577194752775,
      "grad_norm": 2.19297456741333,
      "learning_rate": 3.121724717912138e-06,
      "logits/chosen": 3.0522961616516113,
      "logits/rejected": 2.7846601009368896,
      "logps/chosen": -3.524689197540283,
      "logps/rejected": -6.6990180015563965,
      "loss": 0.2448,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -5.2870330810546875,
      "rewards/margins": 4.76149320602417,
      "rewards/rejected": -10.048526763916016,
      "step": 630
    },
    {
      "epoch": 0.5166498486377397,
      "grad_norm": 2.607801914215088,
      "learning_rate": 3.0708771752766397e-06,
      "logits/chosen": 2.4197397232055664,
      "logits/rejected": 2.1711297035217285,
      "logps/chosen": -3.635500431060791,
      "logps/rejected": -7.1246538162231445,
      "loss": 0.2505,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -5.453250885009766,
      "rewards/margins": 5.233729362487793,
      "rewards/rejected": -10.686980247497559,
      "step": 640
    },
    {
      "epoch": 0.5247225025227044,
      "grad_norm": 2.6676313877105713,
      "learning_rate": 3.019779227044398e-06,
      "logits/chosen": 3.1394009590148926,
      "logits/rejected": 3.1322834491729736,
      "logps/chosen": -3.8464252948760986,
      "logps/rejected": -6.729602813720703,
      "loss": 0.2889,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -5.7696380615234375,
      "rewards/margins": 4.324765682220459,
      "rewards/rejected": -10.094404220581055,
      "step": 650
    },
    {
      "epoch": 0.5247225025227044,
      "eval_logits/chosen": 2.311185359954834,
      "eval_logits/rejected": 2.0363073348999023,
      "eval_logps/chosen": -3.5034563541412354,
      "eval_logps/rejected": -6.952638149261475,
      "eval_loss": 0.2533933222293854,
      "eval_rewards/accuracies": 0.9405940771102905,
      "eval_rewards/chosen": -5.255184173583984,
      "eval_rewards/margins": 5.173771858215332,
      "eval_rewards/rejected": -10.428956031799316,
      "eval_runtime": 30.005,
      "eval_samples_per_second": 26.696,
      "eval_steps_per_second": 3.366,
      "step": 650
    },
    {
      "epoch": 0.5327951564076691,
      "grad_norm": 4.812018871307373,
      "learning_rate": 2.9684532864643123e-06,
      "logits/chosen": 2.8078818321228027,
      "logits/rejected": 2.887716770172119,
      "logps/chosen": -3.8643996715545654,
      "logps/rejected": -7.7119011878967285,
      "loss": 0.2228,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -5.796599388122559,
      "rewards/margins": 5.771251678466797,
      "rewards/rejected": -11.567851066589355,
      "step": 660
    },
    {
      "epoch": 0.5408678102926338,
      "grad_norm": 4.40342378616333,
      "learning_rate": 2.9169218667902562e-06,
      "logits/chosen": 1.7638460397720337,
      "logits/rejected": 1.8495973348617554,
      "logps/chosen": -3.6541030406951904,
      "logps/rejected": -7.6542510986328125,
      "loss": 0.2443,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -5.481154441833496,
      "rewards/margins": 6.000222682952881,
      "rewards/rejected": -11.481378555297852,
      "step": 670
    },
    {
      "epoch": 0.5489404641775983,
      "grad_norm": 3.875150680541992,
      "learning_rate": 2.8652075714060296e-06,
      "logits/chosen": 2.295011043548584,
      "logits/rejected": 2.3379902839660645,
      "logps/chosen": -3.6108944416046143,
      "logps/rejected": -7.423912048339844,
      "loss": 0.2413,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -5.4163408279418945,
      "rewards/margins": 5.7195258140563965,
      "rewards/rejected": -11.13586711883545,
      "step": 680
    },
    {
      "epoch": 0.557013118062563,
      "grad_norm": 2.429936408996582,
      "learning_rate": 2.813333083910761e-06,
      "logits/chosen": 2.482734441757202,
      "logits/rejected": 2.283761978149414,
      "logps/chosen": -4.071568965911865,
      "logps/rejected": -8.294123649597168,
      "loss": 0.2356,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -6.107354164123535,
      "rewards/margins": 6.333830833435059,
      "rewards/rejected": -12.44118595123291,
      "step": 690
    },
    {
      "epoch": 0.5650857719475277,
      "grad_norm": 6.751905918121338,
      "learning_rate": 2.761321158169134e-06,
      "logits/chosen": 2.7583765983581543,
      "logits/rejected": 2.3130671977996826,
      "logps/chosen": -3.6415677070617676,
      "logps/rejected": -8.02357006072998,
      "loss": 0.212,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -5.4623517990112305,
      "rewards/margins": 6.573002815246582,
      "rewards/rejected": -12.035354614257812,
      "step": 700
    },
    {
      "epoch": 0.5650857719475277,
      "eval_logits/chosen": 2.289931058883667,
      "eval_logits/rejected": 2.0797195434570312,
      "eval_logps/chosen": -3.6428418159484863,
      "eval_logps/rejected": -7.301985263824463,
      "eval_loss": 0.24302434921264648,
      "eval_rewards/accuracies": 0.9504950642585754,
      "eval_rewards/chosen": -5.464262962341309,
      "eval_rewards/margins": 5.48871374130249,
      "eval_rewards/rejected": -10.952977180480957,
      "eval_runtime": 30.0073,
      "eval_samples_per_second": 26.693,
      "eval_steps_per_second": 3.366,
      "step": 700
    },
    {
      "epoch": 0.5731584258324924,
      "grad_norm": 3.459409475326538,
      "learning_rate": 2.70919460833079e-06,
      "logits/chosen": 2.6869912147521973,
      "logits/rejected": 2.5928313732147217,
      "logps/chosen": -3.7079944610595703,
      "logps/rejected": -8.225436210632324,
      "loss": 0.192,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -5.5619916915893555,
      "rewards/margins": 6.776162624359131,
      "rewards/rejected": -12.338154792785645,
      "step": 710
    },
    {
      "epoch": 0.5812310797174571,
      "grad_norm": 3.1674091815948486,
      "learning_rate": 2.6569762988232838e-06,
      "logits/chosen": 2.8864941596984863,
      "logits/rejected": 2.7974016666412354,
      "logps/chosen": -3.6625328063964844,
      "logps/rejected": -7.5159406661987305,
      "loss": 0.2578,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -5.493799686431885,
      "rewards/margins": 5.780111789703369,
      "rewards/rejected": -11.273911476135254,
      "step": 720
    },
    {
      "epoch": 0.5893037336024218,
      "grad_norm": 6.737880229949951,
      "learning_rate": 2.604689134322999e-06,
      "logits/chosen": 2.790544033050537,
      "logits/rejected": 2.9442079067230225,
      "logps/chosen": -3.4699268341064453,
      "logps/rejected": -7.267726898193359,
      "loss": 0.2569,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -5.204890727996826,
      "rewards/margins": 5.6967010498046875,
      "rewards/rejected": -10.901591300964355,
      "step": 730
    },
    {
      "epoch": 0.5973763874873865,
      "grad_norm": 5.664917945861816,
      "learning_rate": 2.5523560497083927e-06,
      "logits/chosen": 2.466938018798828,
      "logits/rejected": 2.281191349029541,
      "logps/chosen": -3.6285691261291504,
      "logps/rejected": -7.843980312347412,
      "loss": 0.205,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -5.442853927612305,
      "rewards/margins": 6.323116302490234,
      "rewards/rejected": -11.765970230102539,
      "step": 740
    },
    {
      "epoch": 0.6054490413723511,
      "grad_norm": 5.38569450378418,
      "learning_rate": 2.5e-06,
      "logits/chosen": 2.642348527908325,
      "logits/rejected": 2.497035503387451,
      "logps/chosen": -3.4735655784606934,
      "logps/rejected": -8.545342445373535,
      "loss": 0.2279,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -5.210348129272461,
      "rewards/margins": 7.607665061950684,
      "rewards/rejected": -12.818013191223145,
      "step": 750
    },
    {
      "epoch": 0.6054490413723511,
      "eval_logits/chosen": 2.3449151515960693,
      "eval_logits/rejected": 2.140186309814453,
      "eval_logps/chosen": -3.696176290512085,
      "eval_logps/rejected": -7.5047287940979,
      "eval_loss": 0.23516784608364105,
      "eval_rewards/accuracies": 0.9603960514068604,
      "eval_rewards/chosen": -5.544264793395996,
      "eval_rewards/margins": 5.712828159332275,
      "eval_rewards/rejected": -11.25709342956543,
      "eval_runtime": 30.0103,
      "eval_samples_per_second": 26.691,
      "eval_steps_per_second": 3.366,
      "step": 750
    },
    {
      "epoch": 0.6135216952573158,
      "grad_norm": 4.593304634094238,
      "learning_rate": 2.447643950291608e-06,
      "logits/chosen": 2.9201483726501465,
      "logits/rejected": 2.6473968029022217,
      "logps/chosen": -3.9514071941375732,
      "logps/rejected": -8.410246849060059,
      "loss": 0.2413,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -5.927111625671387,
      "rewards/margins": 6.688260555267334,
      "rewards/rejected": -12.615371704101562,
      "step": 760
    },
    {
      "epoch": 0.6215943491422805,
      "grad_norm": 8.970620155334473,
      "learning_rate": 2.3953108656770018e-06,
      "logits/chosen": 2.294482707977295,
      "logits/rejected": 2.2701497077941895,
      "logps/chosen": -4.16445255279541,
      "logps/rejected": -8.862281799316406,
      "loss": 0.2045,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -6.246678352355957,
      "rewards/margins": 7.0467424392700195,
      "rewards/rejected": -13.293420791625977,
      "step": 770
    },
    {
      "epoch": 0.6296670030272452,
      "grad_norm": 4.757198333740234,
      "learning_rate": 2.3430237011767166e-06,
      "logits/chosen": 2.465088367462158,
      "logits/rejected": 2.3629677295684814,
      "logps/chosen": -3.9091827869415283,
      "logps/rejected": -7.637685298919678,
      "loss": 0.2382,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -5.863774299621582,
      "rewards/margins": 5.5927534103393555,
      "rewards/rejected": -11.456528663635254,
      "step": 780
    },
    {
      "epoch": 0.6377396569122099,
      "grad_norm": 3.500101089477539,
      "learning_rate": 2.290805391669212e-06,
      "logits/chosen": 2.6884655952453613,
      "logits/rejected": 2.5679004192352295,
      "logps/chosen": -3.4037024974823,
      "logps/rejected": -8.056086540222168,
      "loss": 0.2333,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -5.105554103851318,
      "rewards/margins": 6.978575706481934,
      "rewards/rejected": -12.08413028717041,
      "step": 790
    },
    {
      "epoch": 0.6458123107971746,
      "grad_norm": 3.8024752140045166,
      "learning_rate": 2.238678841830867e-06,
      "logits/chosen": 2.357815742492676,
      "logits/rejected": 2.6706268787384033,
      "logps/chosen": -3.713919162750244,
      "logps/rejected": -7.829472541809082,
      "loss": 0.2555,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -5.5708794593811035,
      "rewards/margins": 6.173328876495361,
      "rewards/rejected": -11.744208335876465,
      "step": 800
    },
    {
      "epoch": 0.6458123107971746,
      "eval_logits/chosen": 2.3652234077453613,
      "eval_logits/rejected": 2.1688482761383057,
      "eval_logps/chosen": -3.6815528869628906,
      "eval_logps/rejected": -7.657222270965576,
      "eval_loss": 0.22401823103427887,
      "eval_rewards/accuracies": 0.9702970385551453,
      "eval_rewards/chosen": -5.522329330444336,
      "eval_rewards/margins": 5.963504791259766,
      "eval_rewards/rejected": -11.485835075378418,
      "eval_runtime": 30.0067,
      "eval_samples_per_second": 26.694,
      "eval_steps_per_second": 3.366,
      "step": 800
    },
    {
      "epoch": 0.6538849646821393,
      "grad_norm": 3.282121419906616,
      "learning_rate": 2.186666916089239e-06,
      "logits/chosen": 2.4973549842834473,
      "logits/rejected": 2.5276618003845215,
      "logps/chosen": -4.102963447570801,
      "logps/rejected": -8.789884567260742,
      "loss": 0.2228,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -6.154445648193359,
      "rewards/margins": 7.030381679534912,
      "rewards/rejected": -13.184826850891113,
      "step": 810
    },
    {
      "epoch": 0.6619576185671039,
      "grad_norm": 3.714979410171509,
      "learning_rate": 2.134792428593971e-06,
      "logits/chosen": 2.866018772125244,
      "logits/rejected": 2.7473669052124023,
      "logps/chosen": -3.602524518966675,
      "logps/rejected": -7.64529275894165,
      "loss": 0.2156,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -5.403786659240723,
      "rewards/margins": 6.064152717590332,
      "rewards/rejected": -11.467939376831055,
      "step": 820
    },
    {
      "epoch": 0.6700302724520686,
      "grad_norm": 4.395923614501953,
      "learning_rate": 2.0830781332097446e-06,
      "logits/chosen": 3.2006161212921143,
      "logits/rejected": 3.0712599754333496,
      "logps/chosen": -3.731093168258667,
      "logps/rejected": -7.530323028564453,
      "loss": 0.2229,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -5.596639633178711,
      "rewards/margins": 5.6988444328308105,
      "rewards/rejected": -11.295483589172363,
      "step": 830
    },
    {
      "epoch": 0.6781029263370333,
      "grad_norm": 4.669759273529053,
      "learning_rate": 2.031546713535688e-06,
      "logits/chosen": 2.637826919555664,
      "logits/rejected": 2.753279209136963,
      "logps/chosen": -4.231513500213623,
      "logps/rejected": -8.334096908569336,
      "loss": 0.2255,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -6.3472700119018555,
      "rewards/margins": 6.15387487411499,
      "rewards/rejected": -12.501144409179688,
      "step": 840
    },
    {
      "epoch": 0.686175580221998,
      "grad_norm": 3.5380821228027344,
      "learning_rate": 1.9802207729556023e-06,
      "logits/chosen": 2.7282066345214844,
      "logits/rejected": 2.3621890544891357,
      "logps/chosen": -3.6814780235290527,
      "logps/rejected": -8.813811302185059,
      "loss": 0.2116,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -5.522217273712158,
      "rewards/margins": 7.698500633239746,
      "rewards/rejected": -13.220718383789062,
      "step": 850
    },
    {
      "epoch": 0.686175580221998,
      "eval_logits/chosen": 2.483670473098755,
      "eval_logits/rejected": 2.245927333831787,
      "eval_logps/chosen": -3.632673501968384,
      "eval_logps/rejected": -7.6703104972839355,
      "eval_loss": 0.21706117689609528,
      "eval_rewards/accuracies": 0.9603960514068604,
      "eval_rewards/chosen": -5.449010848999023,
      "eval_rewards/margins": 6.056455612182617,
      "eval_rewards/rejected": -11.505465507507324,
      "eval_runtime": 29.9959,
      "eval_samples_per_second": 26.704,
      "eval_steps_per_second": 3.367,
      "step": 850
    },
    {
      "epoch": 0.6942482341069627,
      "grad_norm": 3.1895673274993896,
      "learning_rate": 1.9291228247233607e-06,
      "logits/chosen": 2.936154842376709,
      "logits/rejected": 2.8326685428619385,
      "logps/chosen": -3.70434308052063,
      "logps/rejected": -7.762692451477051,
      "loss": 0.237,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -5.556514739990234,
      "rewards/margins": 6.087524890899658,
      "rewards/rejected": -11.644038200378418,
      "step": 860
    },
    {
      "epoch": 0.7023208879919274,
      "grad_norm": 3.543269395828247,
      "learning_rate": 1.8782752820878636e-06,
      "logits/chosen": 3.0214743614196777,
      "logits/rejected": 2.7598631381988525,
      "logps/chosen": -4.115224361419678,
      "logps/rejected": -8.607885360717773,
      "loss": 0.2437,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -6.172836780548096,
      "rewards/margins": 6.738990783691406,
      "rewards/rejected": -12.911827087402344,
      "step": 870
    },
    {
      "epoch": 0.7103935418768921,
      "grad_norm": 4.019063472747803,
      "learning_rate": 1.827700448461836e-06,
      "logits/chosen": 2.520110607147217,
      "logits/rejected": 2.5731875896453857,
      "logps/chosen": -3.931302547454834,
      "logps/rejected": -8.647977828979492,
      "loss": 0.2125,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -5.896953582763672,
      "rewards/margins": 7.075013160705566,
      "rewards/rejected": -12.971966743469238,
      "step": 880
    },
    {
      "epoch": 0.7184661957618567,
      "grad_norm": 2.3453805446624756,
      "learning_rate": 1.7774205076388207e-06,
      "logits/chosen": 3.297231674194336,
      "logits/rejected": 2.8459277153015137,
      "logps/chosen": -4.021622657775879,
      "logps/rejected": -9.263435363769531,
      "loss": 0.1604,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -6.032434463500977,
      "rewards/margins": 7.862718105316162,
      "rewards/rejected": -13.89515209197998,
      "step": 890
    },
    {
      "epoch": 0.7265388496468214,
      "grad_norm": 5.522162914276123,
      "learning_rate": 1.7274575140626318e-06,
      "logits/chosen": 2.0340800285339355,
      "logits/rejected": 2.0777273178100586,
      "logps/chosen": -3.8453431129455566,
      "logps/rejected": -9.153608322143555,
      "loss": 0.2305,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -5.768013954162598,
      "rewards/margins": 7.962396144866943,
      "rewards/rejected": -13.7304105758667,
      "step": 900
    },
    {
      "epoch": 0.7265388496468214,
      "eval_logits/chosen": 2.456989049911499,
      "eval_logits/rejected": 2.3152406215667725,
      "eval_logps/chosen": -3.8842220306396484,
      "eval_logps/rejected": -8.038114547729492,
      "eval_loss": 0.21270446479320526,
      "eval_rewards/accuracies": 0.9801980257034302,
      "eval_rewards/chosen": -5.826333999633789,
      "eval_rewards/margins": 6.230839729309082,
      "eval_rewards/rejected": -12.057172775268555,
      "eval_runtime": 30.0018,
      "eval_samples_per_second": 26.698,
      "eval_steps_per_second": 3.366,
      "step": 900
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 2.1917477978484244e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}