File size: 34,256 Bytes
8d64e04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9991111111111111,
  "eval_steps": 100,
  "global_step": 562,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0017777777777777779,
      "grad_norm": 0.34293448639816615,
      "learning_rate": 8.771929824561403e-09,
      "logits/chosen": 13.316938400268555,
      "logits/rejected": 13.604828834533691,
      "logps/chosen": -366.0712890625,
      "logps/rejected": -384.89288330078125,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.017777777777777778,
      "grad_norm": 0.38494137076048135,
      "learning_rate": 8.771929824561403e-08,
      "logits/chosen": 14.320709228515625,
      "logits/rejected": 12.689621925354004,
      "logps/chosen": -420.3371887207031,
      "logps/rejected": -431.1959228515625,
      "loss": 0.6929,
      "rewards/accuracies": 0.3194444477558136,
      "rewards/chosen": -0.0003511953691486269,
      "rewards/margins": 0.00028524029767140746,
      "rewards/rejected": -0.0006364354630932212,
      "step": 10
    },
    {
      "epoch": 0.035555555555555556,
      "grad_norm": 0.37882743094316385,
      "learning_rate": 1.7543859649122805e-07,
      "logits/chosen": 13.643712997436523,
      "logits/rejected": 11.55290412902832,
      "logps/chosen": -413.57061767578125,
      "logps/rejected": -409.67694091796875,
      "loss": 0.6937,
      "rewards/accuracies": 0.4437499940395355,
      "rewards/chosen": -0.0014681232860311866,
      "rewards/margins": -0.0013028818648308516,
      "rewards/rejected": -0.00016524126112926751,
      "step": 20
    },
    {
      "epoch": 0.05333333333333334,
      "grad_norm": 0.3955514954356053,
      "learning_rate": 2.631578947368421e-07,
      "logits/chosen": 13.529040336608887,
      "logits/rejected": 12.290833473205566,
      "logps/chosen": -371.78033447265625,
      "logps/rejected": -382.180419921875,
      "loss": 0.6925,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.00034525356022641063,
      "rewards/margins": 0.004034269601106644,
      "rewards/rejected": -0.0043795229867100716,
      "step": 30
    },
    {
      "epoch": 0.07111111111111111,
      "grad_norm": 0.3113111173035772,
      "learning_rate": 3.508771929824561e-07,
      "logits/chosen": 13.795173645019531,
      "logits/rejected": 12.55676555633545,
      "logps/chosen": -389.0183410644531,
      "logps/rejected": -415.35247802734375,
      "loss": 0.6938,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.003066176315769553,
      "rewards/margins": -0.0013317791745066643,
      "rewards/rejected": -0.0017343973740935326,
      "step": 40
    },
    {
      "epoch": 0.08888888888888889,
      "grad_norm": 0.32606516023193766,
      "learning_rate": 4.3859649122807013e-07,
      "logits/chosen": 14.106832504272461,
      "logits/rejected": 12.1203031539917,
      "logps/chosen": -422.3417053222656,
      "logps/rejected": -397.51385498046875,
      "loss": 0.6937,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": -0.00344509887509048,
      "rewards/margins": -0.0021442542783915997,
      "rewards/rejected": -0.001300844713114202,
      "step": 50
    },
    {
      "epoch": 0.10666666666666667,
      "grad_norm": 0.33670626880499194,
      "learning_rate": 4.999564631597801e-07,
      "logits/chosen": 13.741307258605957,
      "logits/rejected": 11.687819480895996,
      "logps/chosen": -413.763427734375,
      "logps/rejected": -397.7774353027344,
      "loss": 0.6916,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.0033744447864592075,
      "rewards/margins": 0.003910833969712257,
      "rewards/rejected": -0.007285278290510178,
      "step": 60
    },
    {
      "epoch": 0.12444444444444444,
      "grad_norm": 0.34616943203548983,
      "learning_rate": 4.991828966534002e-07,
      "logits/chosen": 14.954236030578613,
      "logits/rejected": 12.991679191589355,
      "logps/chosen": -437.3307189941406,
      "logps/rejected": -431.5257263183594,
      "loss": 0.6914,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.0038897425401955843,
      "rewards/margins": 0.0035481129307299852,
      "rewards/rejected": -0.007437856402248144,
      "step": 70
    },
    {
      "epoch": 0.14222222222222222,
      "grad_norm": 0.4091997164465182,
      "learning_rate": 4.974452899279291e-07,
      "logits/chosen": 12.901407241821289,
      "logits/rejected": 11.605668067932129,
      "logps/chosen": -391.2544250488281,
      "logps/rejected": -403.60089111328125,
      "loss": 0.692,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.009723111987113953,
      "rewards/margins": 0.000866956717800349,
      "rewards/rejected": -0.010590068995952606,
      "step": 80
    },
    {
      "epoch": 0.16,
      "grad_norm": 0.41253119841994956,
      "learning_rate": 4.947503654462276e-07,
      "logits/chosen": 13.798151016235352,
      "logits/rejected": 12.685941696166992,
      "logps/chosen": -403.194580078125,
      "logps/rejected": -421.2926330566406,
      "loss": 0.6888,
      "rewards/accuracies": 0.5062500238418579,
      "rewards/chosen": -0.012594172731041908,
      "rewards/margins": 0.004347759298980236,
      "rewards/rejected": -0.01694193109869957,
      "step": 90
    },
    {
      "epoch": 0.17777777777777778,
      "grad_norm": 0.3696840111639605,
      "learning_rate": 4.911085493475802e-07,
      "logits/chosen": 14.222944259643555,
      "logits/rejected": 12.481651306152344,
      "logps/chosen": -427.9515075683594,
      "logps/rejected": -426.4696350097656,
      "loss": 0.6885,
      "rewards/accuracies": 0.606249988079071,
      "rewards/chosen": -0.01204882562160492,
      "rewards/margins": 0.010180080309510231,
      "rewards/rejected": -0.02222890593111515,
      "step": 100
    },
    {
      "epoch": 0.17777777777777778,
      "eval_logits/chosen": 14.262085914611816,
      "eval_logits/rejected": 12.830548286437988,
      "eval_logps/chosen": -407.780517578125,
      "eval_logps/rejected": -402.74957275390625,
      "eval_loss": 0.6883670687675476,
      "eval_rewards/accuracies": 0.6190476417541504,
      "eval_rewards/chosen": -0.015755515545606613,
      "eval_rewards/margins": 0.008608067408204079,
      "eval_rewards/rejected": -0.024363582953810692,
      "eval_runtime": 91.0668,
      "eval_samples_per_second": 10.981,
      "eval_steps_per_second": 0.692,
      "step": 100
    },
    {
      "epoch": 0.19555555555555557,
      "grad_norm": 0.387876184968801,
      "learning_rate": 4.865339311109869e-07,
      "logits/chosen": 13.772363662719727,
      "logits/rejected": 12.22777271270752,
      "logps/chosen": -405.01214599609375,
      "logps/rejected": -403.9130859375,
      "loss": 0.688,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.0224502794444561,
      "rewards/margins": 0.012338267639279366,
      "rewards/rejected": -0.03478854522109032,
      "step": 110
    },
    {
      "epoch": 0.21333333333333335,
      "grad_norm": 0.3899509909292818,
      "learning_rate": 4.810442090457072e-07,
      "logits/chosen": 13.653238296508789,
      "logits/rejected": 12.279303550720215,
      "logps/chosen": -402.15704345703125,
      "logps/rejected": -404.1790466308594,
      "loss": 0.6866,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -0.024786310270428658,
      "rewards/margins": 0.014094953425228596,
      "rewards/rejected": -0.03888126462697983,
      "step": 120
    },
    {
      "epoch": 0.2311111111111111,
      "grad_norm": 0.3557450823856152,
      "learning_rate": 4.746606218199385e-07,
      "logits/chosen": 14.775594711303711,
      "logits/rejected": 12.874218940734863,
      "logps/chosen": -390.8421630859375,
      "logps/rejected": -400.7987365722656,
      "loss": 0.6854,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.03051091358065605,
      "rewards/margins": 0.017188305035233498,
      "rewards/rejected": -0.0476992204785347,
      "step": 130
    },
    {
      "epoch": 0.24888888888888888,
      "grad_norm": 0.36125778338353265,
      "learning_rate": 4.674078662925359e-07,
      "logits/chosen": 13.882017135620117,
      "logits/rejected": 12.225305557250977,
      "logps/chosen": -420.66033935546875,
      "logps/rejected": -413.9700622558594,
      "loss": 0.6837,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.040414221584796906,
      "rewards/margins": 0.021218815818428993,
      "rewards/rejected": -0.06163303926587105,
      "step": 140
    },
    {
      "epoch": 0.26666666666666666,
      "grad_norm": 0.42414032297199183,
      "learning_rate": 4.593140019656625e-07,
      "logits/chosen": 13.458653450012207,
      "logits/rejected": 11.939371109008789,
      "logps/chosen": -395.70318603515625,
      "logps/rejected": -426.5689392089844,
      "loss": 0.6815,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -0.04780956357717514,
      "rewards/margins": 0.022214924916625023,
      "rewards/rejected": -0.07002449035644531,
      "step": 150
    },
    {
      "epoch": 0.28444444444444444,
      "grad_norm": 0.31038758853221154,
      "learning_rate": 4.504103424280266e-07,
      "logits/chosen": 14.602518081665039,
      "logits/rejected": 12.48104476928711,
      "logps/chosen": -445.41900634765625,
      "logps/rejected": -427.4073791503906,
      "loss": 0.6785,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -0.04993806034326553,
      "rewards/margins": 0.03340305760502815,
      "rewards/rejected": -0.08334111422300339,
      "step": 160
    },
    {
      "epoch": 0.3022222222222222,
      "grad_norm": 0.5166732651791626,
      "learning_rate": 4.407313342086905e-07,
      "logits/chosen": 14.151707649230957,
      "logits/rejected": 12.189191818237305,
      "logps/chosen": -463.04193115234375,
      "logps/rejected": -419.1507263183594,
      "loss": 0.6757,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.06344902515411377,
      "rewards/margins": 0.031504396349191666,
      "rewards/rejected": -0.09495342522859573,
      "step": 170
    },
    {
      "epoch": 0.32,
      "grad_norm": 0.351499350492249,
      "learning_rate": 4.3031442351014115e-07,
      "logits/chosen": 13.737968444824219,
      "logits/rejected": 12.326078414916992,
      "logps/chosen": -399.82208251953125,
      "logps/rejected": -415.14105224609375,
      "loss": 0.6754,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.08228292316198349,
      "rewards/margins": 0.03530151769518852,
      "rewards/rejected": -0.11758442968130112,
      "step": 180
    },
    {
      "epoch": 0.3377777777777778,
      "grad_norm": 0.4342072416786669,
      "learning_rate": 4.19199911336207e-07,
      "logits/chosen": 13.775731086730957,
      "logits/rejected": 11.498677253723145,
      "logps/chosen": -436.33734130859375,
      "logps/rejected": -418.3775939941406,
      "loss": 0.6717,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -0.08298386633396149,
      "rewards/margins": 0.04400986433029175,
      "rewards/rejected": -0.12699371576309204,
      "step": 190
    },
    {
      "epoch": 0.35555555555555557,
      "grad_norm": 0.4572664943304227,
      "learning_rate": 4.0743079757530443e-07,
      "logits/chosen": 14.21821403503418,
      "logits/rejected": 12.080196380615234,
      "logps/chosen": -428.86083984375,
      "logps/rejected": -398.98223876953125,
      "loss": 0.6712,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.08896859735250473,
      "rewards/margins": 0.042290668934583664,
      "rewards/rejected": -0.1312592625617981,
      "step": 200
    },
    {
      "epoch": 0.35555555555555557,
      "eval_logits/chosen": 14.084491729736328,
      "eval_logits/rejected": 12.648215293884277,
      "eval_logps/chosen": -415.914794921875,
      "eval_logps/rejected": -414.950439453125,
      "eval_loss": 0.6679643988609314,
      "eval_rewards/accuracies": 0.7936508059501648,
      "eval_rewards/chosen": -0.09709871560335159,
      "eval_rewards/margins": 0.04927373677492142,
      "eval_rewards/rejected": -0.146372452378273,
      "eval_runtime": 91.0693,
      "eval_samples_per_second": 10.981,
      "eval_steps_per_second": 0.692,
      "step": 200
    },
    {
      "epoch": 0.37333333333333335,
      "grad_norm": 0.3933409306767686,
      "learning_rate": 3.9505261464222127e-07,
      "logits/chosen": 14.01300048828125,
      "logits/rejected": 12.405731201171875,
      "logps/chosen": -414.12432861328125,
      "logps/rejected": -435.1546936035156,
      "loss": 0.6667,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -0.10342928022146225,
      "rewards/margins": 0.06037931516766548,
      "rewards/rejected": -0.16380859911441803,
      "step": 210
    },
    {
      "epoch": 0.39111111111111113,
      "grad_norm": 0.48372655964524586,
      "learning_rate": 3.821132513220511e-07,
      "logits/chosen": 13.346511840820312,
      "logits/rejected": 11.28764820098877,
      "logps/chosen": -409.07830810546875,
      "logps/rejected": -409.73162841796875,
      "loss": 0.6614,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -0.11821464449167252,
      "rewards/margins": 0.068251833319664,
      "rewards/rejected": -0.18646648526191711,
      "step": 220
    },
    {
      "epoch": 0.4088888888888889,
      "grad_norm": 0.4254357046966758,
      "learning_rate": 3.6866276749778575e-07,
      "logits/chosen": 14.086942672729492,
      "logits/rejected": 12.073091506958008,
      "logps/chosen": -421.7301330566406,
      "logps/rejected": -425.4147033691406,
      "loss": 0.6613,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -0.1326219141483307,
      "rewards/margins": 0.06661403924226761,
      "rewards/rejected": -0.1992359459400177,
      "step": 230
    },
    {
      "epoch": 0.4266666666666667,
      "grad_norm": 0.5227754305907224,
      "learning_rate": 3.5475320047835385e-07,
      "logits/chosen": 13.673505783081055,
      "logits/rejected": 11.437660217285156,
      "logps/chosen": -427.7577209472656,
      "logps/rejected": -431.9249572753906,
      "loss": 0.6557,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.14575575292110443,
      "rewards/margins": 0.08081929385662079,
      "rewards/rejected": -0.22657504677772522,
      "step": 240
    },
    {
      "epoch": 0.4444444444444444,
      "grad_norm": 0.5124383191463783,
      "learning_rate": 3.4043836367638084e-07,
      "logits/chosen": 14.250768661499023,
      "logits/rejected": 12.134879112243652,
      "logps/chosen": -437.49945068359375,
      "logps/rejected": -460.59295654296875,
      "loss": 0.656,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -0.15989777445793152,
      "rewards/margins": 0.09294405579566956,
      "rewards/rejected": -0.25284186005592346,
      "step": 250
    },
    {
      "epoch": 0.4622222222222222,
      "grad_norm": 0.46067856126558626,
      "learning_rate": 3.257736384145506e-07,
      "logits/chosen": 14.0197172164917,
      "logits/rejected": 12.032342910766602,
      "logps/chosen": -459.6131286621094,
      "logps/rejected": -442.3273010253906,
      "loss": 0.6506,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.17320798337459564,
      "rewards/margins": 0.0869670957326889,
      "rewards/rejected": -0.26017507910728455,
      "step": 260
    },
    {
      "epoch": 0.48,
      "grad_norm": 0.5260206224619711,
      "learning_rate": 3.1081575966602624e-07,
      "logits/chosen": 12.930171012878418,
      "logits/rejected": 11.695451736450195,
      "logps/chosen": -399.03790283203125,
      "logps/rejected": -436.02532958984375,
      "loss": 0.6438,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.21191565692424774,
      "rewards/margins": 0.0779983252286911,
      "rewards/rejected": -0.28991398215293884,
      "step": 270
    },
    {
      "epoch": 0.49777777777777776,
      "grad_norm": 0.49006540494490103,
      "learning_rate": 2.9562259655786065e-07,
      "logits/chosen": 14.197479248046875,
      "logits/rejected": 12.239548683166504,
      "logps/chosen": -450.38623046875,
      "logps/rejected": -440.9632873535156,
      "loss": 0.6414,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.21933193504810333,
      "rewards/margins": 0.11443768441677094,
      "rewards/rejected": -0.33376961946487427,
      "step": 280
    },
    {
      "epoch": 0.5155555555555555,
      "grad_norm": 0.43019209624061255,
      "learning_rate": 2.8025292848658625e-07,
      "logits/chosen": 13.873621940612793,
      "logits/rejected": 11.836385726928711,
      "logps/chosen": -467.55108642578125,
      "logps/rejected": -473.07818603515625,
      "loss": 0.6342,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -0.23439328372478485,
      "rewards/margins": 0.13239440321922302,
      "rewards/rejected": -0.3667876720428467,
      "step": 290
    },
    {
      "epoch": 0.5333333333333333,
      "grad_norm": 0.46075707476643984,
      "learning_rate": 2.647662177121486e-07,
      "logits/chosen": 13.895523071289062,
      "logits/rejected": 11.491305351257324,
      "logps/chosen": -416.88592529296875,
      "logps/rejected": -417.43243408203125,
      "loss": 0.6339,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.2427409142255783,
      "rewards/margins": 0.10592355579137802,
      "rewards/rejected": -0.3486644923686981,
      "step": 300
    },
    {
      "epoch": 0.5333333333333333,
      "eval_logits/chosen": 14.174385070800781,
      "eval_logits/rejected": 12.855634689331055,
      "eval_logps/chosen": -432.1300354003906,
      "eval_logps/rejected": -437.43072509765625,
      "eval_loss": 0.6389195322990417,
      "eval_rewards/accuracies": 0.7539682388305664,
      "eval_rewards/chosen": -0.2592509388923645,
      "eval_rewards/margins": 0.11192431300878525,
      "eval_rewards/rejected": -0.37117522954940796,
      "eval_runtime": 91.0722,
      "eval_samples_per_second": 10.98,
      "eval_steps_per_second": 0.692,
      "step": 300
    },
    {
      "epoch": 0.5511111111111111,
      "grad_norm": 0.5603386362578662,
      "learning_rate": 2.492223793099743e-07,
      "logits/chosen": 13.52906608581543,
      "logits/rejected": 12.464258193969727,
      "logps/chosen": -425.34600830078125,
      "logps/rejected": -457.48968505859375,
      "loss": 0.6345,
      "rewards/accuracies": 0.8062499761581421,
      "rewards/chosen": -0.26422396302223206,
      "rewards/margins": 0.11599861085414886,
      "rewards/rejected": -0.3802226185798645,
      "step": 310
    },
    {
      "epoch": 0.5688888888888889,
      "grad_norm": 0.5899560581428063,
      "learning_rate": 2.3368154937118352e-07,
      "logits/chosen": 14.568766593933105,
      "logits/rejected": 12.381284713745117,
      "logps/chosen": -482.33648681640625,
      "logps/rejected": -460.39959716796875,
      "loss": 0.6352,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.2616764307022095,
      "rewards/margins": 0.11895668506622314,
      "rewards/rejected": -0.3806331157684326,
      "step": 320
    },
    {
      "epoch": 0.5866666666666667,
      "grad_norm": 0.5671140786141569,
      "learning_rate": 2.1820385234773604e-07,
      "logits/chosen": 13.732978820800781,
      "logits/rejected": 11.89165210723877,
      "logps/chosen": -432.77435302734375,
      "logps/rejected": -449.07147216796875,
      "loss": 0.6348,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -0.28699907660484314,
      "rewards/margins": 0.1393791139125824,
      "rewards/rejected": -0.42637819051742554,
      "step": 330
    },
    {
      "epoch": 0.6044444444444445,
      "grad_norm": 0.494895870546235,
      "learning_rate": 2.0284916844260697e-07,
      "logits/chosen": 14.013145446777344,
      "logits/rejected": 12.176668167114258,
      "logps/chosen": -466.3651428222656,
      "logps/rejected": -457.82196044921875,
      "loss": 0.6271,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -0.2999596893787384,
      "rewards/margins": 0.150094673037529,
      "rewards/rejected": -0.4500543177127838,
      "step": 340
    },
    {
      "epoch": 0.6222222222222222,
      "grad_norm": 0.44235392764613574,
      "learning_rate": 1.876769019449141e-07,
      "logits/chosen": 13.546051025390625,
      "logits/rejected": 11.55055046081543,
      "logps/chosen": -455.70892333984375,
      "logps/rejected": -447.29156494140625,
      "loss": 0.6205,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -0.32695135474205017,
      "rewards/margins": 0.1475459635257721,
      "rewards/rejected": -0.47449731826782227,
      "step": 350
    },
    {
      "epoch": 0.64,
      "grad_norm": 0.4906124921285579,
      "learning_rate": 1.7274575140626315e-07,
      "logits/chosen": 13.1173095703125,
      "logits/rejected": 10.774048805236816,
      "logps/chosen": -454.48797607421875,
      "logps/rejected": -431.7547912597656,
      "loss": 0.6226,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.30864274501800537,
      "rewards/margins": 0.155232235789299,
      "rewards/rejected": -0.4638749659061432,
      "step": 360
    },
    {
      "epoch": 0.6577777777777778,
      "grad_norm": 0.632117867399185,
      "learning_rate": 1.5811348254745572e-07,
      "logits/chosen": 13.715913772583008,
      "logits/rejected": 12.367976188659668,
      "logps/chosen": -395.98883056640625,
      "logps/rejected": -431.3076171875,
      "loss": 0.6198,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.3397838771343231,
      "rewards/margins": 0.15032809972763062,
      "rewards/rejected": -0.49011197686195374,
      "step": 370
    },
    {
      "epoch": 0.6755555555555556,
      "grad_norm": 0.49281947117085684,
      "learning_rate": 1.4383670477413674e-07,
      "logits/chosen": 13.90184211730957,
      "logits/rejected": 11.279874801635742,
      "logps/chosen": -461.05078125,
      "logps/rejected": -442.17449951171875,
      "loss": 0.6203,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": -0.3398580551147461,
      "rewards/margins": 0.16354627907276154,
      "rewards/rejected": -0.5034043192863464,
      "step": 380
    },
    {
      "epoch": 0.6933333333333334,
      "grad_norm": 0.42880409636253375,
      "learning_rate": 1.2997065216600178e-07,
      "logits/chosen": 13.401517868041992,
      "logits/rejected": 12.245413780212402,
      "logps/chosen": -430.324462890625,
      "logps/rejected": -473.84893798828125,
      "loss": 0.6211,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.3511757552623749,
      "rewards/margins": 0.1507997363805771,
      "rewards/rejected": -0.5019755363464355,
      "step": 390
    },
    {
      "epoch": 0.7111111111111111,
      "grad_norm": 0.40477681812860294,
      "learning_rate": 1.1656896978687259e-07,
      "logits/chosen": 13.779144287109375,
      "logits/rejected": 11.85938835144043,
      "logps/chosen": -440.189208984375,
      "logps/rejected": -442.59576416015625,
      "loss": 0.6203,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -0.3478553891181946,
      "rewards/margins": 0.1676493138074875,
      "rewards/rejected": -0.5155047178268433,
      "step": 400
    },
    {
      "epoch": 0.7111111111111111,
      "eval_logits/chosen": 13.944354057312012,
      "eval_logits/rejected": 12.625584602355957,
      "eval_logps/chosen": -443.5887451171875,
      "eval_logps/rejected": -453.4456787109375,
      "eval_loss": 0.6203290224075317,
      "eval_rewards/accuracies": 0.7539682388305664,
      "eval_rewards/chosen": -0.37383800745010376,
      "eval_rewards/margins": 0.15748701989650726,
      "eval_rewards/rejected": -0.531325101852417,
      "eval_runtime": 91.0982,
      "eval_samples_per_second": 10.977,
      "eval_steps_per_second": 0.692,
      "step": 400
    },
    {
      "epoch": 0.7288888888888889,
      "grad_norm": 0.4976401369848891,
      "learning_rate": 1.0368350614236685e-07,
      "logits/chosen": 14.432429313659668,
      "logits/rejected": 12.734024047851562,
      "logps/chosen": -437.3631286621094,
      "logps/rejected": -450.3123474121094,
      "loss": 0.6156,
      "rewards/accuracies": 0.8062499761581421,
      "rewards/chosen": -0.37959882616996765,
      "rewards/margins": 0.17338675260543823,
      "rewards/rejected": -0.5529855489730835,
      "step": 410
    },
    {
      "epoch": 0.7466666666666667,
      "grad_norm": 0.49003467823924163,
      "learning_rate": 9.136411258810229e-08,
      "logits/chosen": 14.124117851257324,
      "logits/rejected": 12.797445297241211,
      "logps/chosen": -429.11767578125,
      "logps/rejected": -457.7822265625,
      "loss": 0.6164,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.3667467534542084,
      "rewards/margins": 0.17160849273204803,
      "rewards/rejected": -0.5383552312850952,
      "step": 420
    },
    {
      "epoch": 0.7644444444444445,
      "grad_norm": 0.5544257285079301,
      "learning_rate": 7.965845046448657e-08,
      "logits/chosen": 14.171014785766602,
      "logits/rejected": 11.860380172729492,
      "logps/chosen": -460.3724670410156,
      "logps/rejected": -464.814208984375,
      "loss": 0.6111,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": -0.3682584762573242,
      "rewards/margins": 0.21057240664958954,
      "rewards/rejected": -0.578830897808075,
      "step": 430
    },
    {
      "epoch": 0.7822222222222223,
      "grad_norm": 0.4893812986921009,
      "learning_rate": 6.861180670424982e-08,
      "logits/chosen": 14.053049087524414,
      "logits/rejected": 12.361749649047852,
      "logps/chosen": -458.22113037109375,
      "logps/rejected": -473.56927490234375,
      "loss": 0.6182,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": -0.38496845960617065,
      "rewards/margins": 0.1849747598171234,
      "rewards/rejected": -0.5699432492256165,
      "step": 440
    },
    {
      "epoch": 0.8,
      "grad_norm": 0.451783793255229,
      "learning_rate": 5.826691862609986e-08,
      "logits/chosen": 14.11127758026123,
      "logits/rejected": 12.265645027160645,
      "logps/chosen": -446.2030334472656,
      "logps/rejected": -448.130859375,
      "loss": 0.6154,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.3934079110622406,
      "rewards/margins": 0.20773550868034363,
      "rewards/rejected": -0.601143479347229,
      "step": 450
    },
    {
      "epoch": 0.8177777777777778,
      "grad_norm": 0.5757576180607112,
      "learning_rate": 4.86638085923389e-08,
      "logits/chosen": 13.425259590148926,
      "logits/rejected": 11.878642082214355,
      "logps/chosen": -452.25042724609375,
      "logps/rejected": -480.77154541015625,
      "loss": 0.6026,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -0.42305225133895874,
      "rewards/margins": 0.19301000237464905,
      "rewards/rejected": -0.6160622835159302,
      "step": 460
    },
    {
      "epoch": 0.8355555555555556,
      "grad_norm": 0.6028404780293103,
      "learning_rate": 3.983962917011829e-08,
      "logits/chosen": 13.737588882446289,
      "logits/rejected": 11.253530502319336,
      "logps/chosen": -453.21466064453125,
      "logps/rejected": -455.3412170410156,
      "loss": 0.6099,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.40479034185409546,
      "rewards/margins": 0.23396852612495422,
      "rewards/rejected": -0.6387587785720825,
      "step": 470
    },
    {
      "epoch": 0.8533333333333334,
      "grad_norm": 0.4822754579818044,
      "learning_rate": 3.182851939537409e-08,
      "logits/chosen": 13.7351655960083,
      "logits/rejected": 12.563407897949219,
      "logps/chosen": -420.5502014160156,
      "logps/rejected": -467.66143798828125,
      "loss": 0.6121,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -0.4305228590965271,
      "rewards/margins": 0.1863301545381546,
      "rewards/rejected": -0.6168529987335205,
      "step": 480
    },
    {
      "epoch": 0.8711111111111111,
      "grad_norm": 0.49761272281985847,
      "learning_rate": 2.466147269552893e-08,
      "logits/chosen": 13.694448471069336,
      "logits/rejected": 12.38302230834961,
      "logps/chosen": -429.01910400390625,
      "logps/rejected": -461.55718994140625,
      "loss": 0.6128,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.4084044396877289,
      "rewards/margins": 0.16671887040138245,
      "rewards/rejected": -0.5751233100891113,
      "step": 490
    },
    {
      "epoch": 0.8888888888888888,
      "grad_norm": 0.6004438030491424,
      "learning_rate": 1.8366216981942628e-08,
      "logits/chosen": 13.4122896194458,
      "logits/rejected": 11.679363250732422,
      "logps/chosen": -436.2166442871094,
      "logps/rejected": -459.05841064453125,
      "loss": 0.6102,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.4055519104003906,
      "rewards/margins": 0.21610493957996368,
      "rewards/rejected": -0.6216568946838379,
      "step": 500
    },
    {
      "epoch": 0.8888888888888888,
      "eval_logits/chosen": 13.842653274536133,
      "eval_logits/rejected": 12.531366348266602,
      "eval_logps/chosen": -447.70013427734375,
      "eval_logps/rejected": -459.2376403808594,
      "eval_loss": 0.6131365299224854,
      "eval_rewards/accuracies": 0.7539682388305664,
      "eval_rewards/chosen": -0.4149521589279175,
      "eval_rewards/margins": 0.17429186403751373,
      "eval_rewards/rejected": -0.5892440676689148,
      "eval_runtime": 91.0747,
      "eval_samples_per_second": 10.98,
      "eval_steps_per_second": 0.692,
      "step": 500
    },
    {
      "epoch": 0.9066666666666666,
      "grad_norm": 0.512528339625896,
      "learning_rate": 1.296710737600934e-08,
      "logits/chosen": 14.248278617858887,
      "logits/rejected": 11.994184494018555,
      "logps/chosen": -501.92333984375,
      "logps/rejected": -475.35723876953125,
      "loss": 0.6136,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -0.40441107749938965,
      "rewards/margins": 0.22114601731300354,
      "rewards/rejected": -0.6255571246147156,
      "step": 510
    },
    {
      "epoch": 0.9244444444444444,
      "grad_norm": 0.46717306963218175,
      "learning_rate": 8.485031983924557e-09,
      "logits/chosen": 13.698689460754395,
      "logits/rejected": 11.797239303588867,
      "logps/chosen": -473.1220703125,
      "logps/rejected": -475.6698303222656,
      "loss": 0.609,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.40674418210983276,
      "rewards/margins": 0.2259480506181717,
      "rewards/rejected": -0.6326922178268433,
      "step": 520
    },
    {
      "epoch": 0.9422222222222222,
      "grad_norm": 0.46210848243091895,
      "learning_rate": 4.937331084660129e-09,
      "logits/chosen": 13.86840534210205,
      "logits/rejected": 11.828570365905762,
      "logps/chosen": -465.6477966308594,
      "logps/rejected": -473.4181213378906,
      "loss": 0.607,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.4186909794807434,
      "rewards/margins": 0.19602572917938232,
      "rewards/rejected": -0.614716649055481,
      "step": 530
    },
    {
      "epoch": 0.96,
      "grad_norm": 0.48983149292621647,
      "learning_rate": 2.337730043793423e-09,
      "logits/chosen": 13.412510871887207,
      "logits/rejected": 11.925616264343262,
      "logps/chosen": -441.61871337890625,
      "logps/rejected": -453.08489990234375,
      "loss": 0.6106,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -0.42602628469467163,
      "rewards/margins": 0.19294434785842896,
      "rewards/rejected": -0.6189705729484558,
      "step": 540
    },
    {
      "epoch": 0.9777777777777777,
      "grad_norm": 0.46702206339372576,
      "learning_rate": 6.962862127343205e-10,
      "logits/chosen": 14.47253131866455,
      "logits/rejected": 12.454832077026367,
      "logps/chosen": -464.0704650878906,
      "logps/rejected": -453.48187255859375,
      "loss": 0.6073,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -0.4236368238925934,
      "rewards/margins": 0.20897309482097626,
      "rewards/rejected": -0.6326099038124084,
      "step": 550
    },
    {
      "epoch": 0.9955555555555555,
      "grad_norm": 0.620631975977973,
      "learning_rate": 1.9350018786556956e-11,
      "logits/chosen": 13.917093276977539,
      "logits/rejected": 12.686841011047363,
      "logps/chosen": -464.35980224609375,
      "logps/rejected": -498.465576171875,
      "loss": 0.6102,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.4351399838924408,
      "rewards/margins": 0.1616152673959732,
      "rewards/rejected": -0.5967552661895752,
      "step": 560
    },
    {
      "epoch": 0.9991111111111111,
      "step": 562,
      "total_flos": 0.0,
      "train_loss": 0.6475430355801701,
      "train_runtime": 7558.4306,
      "train_samples_per_second": 4.763,
      "train_steps_per_second": 0.074
    }
  ],
  "logging_steps": 10,
  "max_steps": 562,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}