hchcsuim's picture
Training in progress, epoch 1
855aad0 verified
{
"best_metric": 0.9340658097220965,
"best_model_checkpoint": "batch-size-16_FFPP-c23_1FPS_faces-expand-0-aligned_unaugmentation\\checkpoint-1381",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1381,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 3.2925620079040527,
"learning_rate": 3.5971223021582732e-06,
"loss": 0.628,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 2.3982741832733154,
"learning_rate": 7.1942446043165465e-06,
"loss": 0.5814,
"step": 20
},
{
"epoch": 0.02,
"grad_norm": 2.776655673980713,
"learning_rate": 1.0791366906474821e-05,
"loss": 0.5151,
"step": 30
},
{
"epoch": 0.03,
"grad_norm": 2.050455331802368,
"learning_rate": 1.4388489208633093e-05,
"loss": 0.5308,
"step": 40
},
{
"epoch": 0.04,
"grad_norm": 2.005922794342041,
"learning_rate": 1.7985611510791367e-05,
"loss": 0.5363,
"step": 50
},
{
"epoch": 0.04,
"grad_norm": 1.7546412944793701,
"learning_rate": 2.1582733812949642e-05,
"loss": 0.4718,
"step": 60
},
{
"epoch": 0.05,
"grad_norm": 3.74489426612854,
"learning_rate": 2.5179856115107914e-05,
"loss": 0.5125,
"step": 70
},
{
"epoch": 0.06,
"grad_norm": 2.5463011264801025,
"learning_rate": 2.8776978417266186e-05,
"loss": 0.505,
"step": 80
},
{
"epoch": 0.07,
"grad_norm": 5.876346588134766,
"learning_rate": 3.237410071942446e-05,
"loss": 0.5294,
"step": 90
},
{
"epoch": 0.07,
"grad_norm": 3.9089746475219727,
"learning_rate": 3.597122302158273e-05,
"loss": 0.5465,
"step": 100
},
{
"epoch": 0.08,
"grad_norm": 5.623407363891602,
"learning_rate": 3.956834532374101e-05,
"loss": 0.5042,
"step": 110
},
{
"epoch": 0.09,
"grad_norm": 4.3956780433654785,
"learning_rate": 4.3165467625899284e-05,
"loss": 0.5046,
"step": 120
},
{
"epoch": 0.09,
"grad_norm": 4.22213077545166,
"learning_rate": 4.676258992805755e-05,
"loss": 0.4896,
"step": 130
},
{
"epoch": 0.1,
"grad_norm": 5.061640739440918,
"learning_rate": 4.99597423510467e-05,
"loss": 0.4615,
"step": 140
},
{
"epoch": 0.11,
"grad_norm": 3.777334213256836,
"learning_rate": 4.9557165861513685e-05,
"loss": 0.4725,
"step": 150
},
{
"epoch": 0.12,
"grad_norm": 8.781298637390137,
"learning_rate": 4.915458937198068e-05,
"loss": 0.4155,
"step": 160
},
{
"epoch": 0.12,
"grad_norm": 6.117956638336182,
"learning_rate": 4.875201288244767e-05,
"loss": 0.4209,
"step": 170
},
{
"epoch": 0.13,
"grad_norm": 3.1794047355651855,
"learning_rate": 4.834943639291466e-05,
"loss": 0.4149,
"step": 180
},
{
"epoch": 0.14,
"grad_norm": 4.002980709075928,
"learning_rate": 4.7946859903381646e-05,
"loss": 0.5147,
"step": 190
},
{
"epoch": 0.14,
"grad_norm": 2.6848464012145996,
"learning_rate": 4.7544283413848634e-05,
"loss": 0.4649,
"step": 200
},
{
"epoch": 0.15,
"grad_norm": 5.1571807861328125,
"learning_rate": 4.714170692431562e-05,
"loss": 0.4018,
"step": 210
},
{
"epoch": 0.16,
"grad_norm": 7.3869309425354,
"learning_rate": 4.673913043478261e-05,
"loss": 0.3477,
"step": 220
},
{
"epoch": 0.17,
"grad_norm": 5.493707656860352,
"learning_rate": 4.63365539452496e-05,
"loss": 0.3752,
"step": 230
},
{
"epoch": 0.17,
"grad_norm": 6.3577070236206055,
"learning_rate": 4.593397745571659e-05,
"loss": 0.3779,
"step": 240
},
{
"epoch": 0.18,
"grad_norm": 6.673830986022949,
"learning_rate": 4.553140096618358e-05,
"loss": 0.4141,
"step": 250
},
{
"epoch": 0.19,
"grad_norm": 3.704331636428833,
"learning_rate": 4.5128824476650565e-05,
"loss": 0.3926,
"step": 260
},
{
"epoch": 0.2,
"grad_norm": 5.406624794006348,
"learning_rate": 4.4726247987117554e-05,
"loss": 0.3742,
"step": 270
},
{
"epoch": 0.2,
"grad_norm": 5.6158528327941895,
"learning_rate": 4.432367149758454e-05,
"loss": 0.3715,
"step": 280
},
{
"epoch": 0.21,
"grad_norm": 5.550079345703125,
"learning_rate": 4.392109500805153e-05,
"loss": 0.3882,
"step": 290
},
{
"epoch": 0.22,
"grad_norm": 5.96751594543457,
"learning_rate": 4.351851851851852e-05,
"loss": 0.3615,
"step": 300
},
{
"epoch": 0.22,
"grad_norm": 5.48460054397583,
"learning_rate": 4.3115942028985515e-05,
"loss": 0.3233,
"step": 310
},
{
"epoch": 0.23,
"grad_norm": 5.41163969039917,
"learning_rate": 4.2713365539452496e-05,
"loss": 0.3723,
"step": 320
},
{
"epoch": 0.24,
"grad_norm": 6.076368808746338,
"learning_rate": 4.2310789049919485e-05,
"loss": 0.3684,
"step": 330
},
{
"epoch": 0.25,
"grad_norm": 9.160358428955078,
"learning_rate": 4.1908212560386474e-05,
"loss": 0.3381,
"step": 340
},
{
"epoch": 0.25,
"grad_norm": 5.847888469696045,
"learning_rate": 4.150563607085346e-05,
"loss": 0.3669,
"step": 350
},
{
"epoch": 0.26,
"grad_norm": 9.13357925415039,
"learning_rate": 4.110305958132045e-05,
"loss": 0.3717,
"step": 360
},
{
"epoch": 0.27,
"grad_norm": 6.672016143798828,
"learning_rate": 4.070048309178744e-05,
"loss": 0.3121,
"step": 370
},
{
"epoch": 0.28,
"grad_norm": 5.612246513366699,
"learning_rate": 4.0297906602254434e-05,
"loss": 0.3004,
"step": 380
},
{
"epoch": 0.28,
"grad_norm": 4.514873027801514,
"learning_rate": 3.9895330112721416e-05,
"loss": 0.3153,
"step": 390
},
{
"epoch": 0.29,
"grad_norm": 4.649542331695557,
"learning_rate": 3.9492753623188405e-05,
"loss": 0.3332,
"step": 400
},
{
"epoch": 0.3,
"grad_norm": 8.960000038146973,
"learning_rate": 3.90901771336554e-05,
"loss": 0.3117,
"step": 410
},
{
"epoch": 0.3,
"grad_norm": 4.450685977935791,
"learning_rate": 3.868760064412238e-05,
"loss": 0.2995,
"step": 420
},
{
"epoch": 0.31,
"grad_norm": 7.965936183929443,
"learning_rate": 3.828502415458937e-05,
"loss": 0.3106,
"step": 430
},
{
"epoch": 0.32,
"grad_norm": 5.865048885345459,
"learning_rate": 3.7882447665056365e-05,
"loss": 0.3165,
"step": 440
},
{
"epoch": 0.33,
"grad_norm": 6.963225841522217,
"learning_rate": 3.7479871175523354e-05,
"loss": 0.318,
"step": 450
},
{
"epoch": 0.33,
"grad_norm": 6.650681018829346,
"learning_rate": 3.7077294685990336e-05,
"loss": 0.3081,
"step": 460
},
{
"epoch": 0.34,
"grad_norm": 4.959587574005127,
"learning_rate": 3.667471819645733e-05,
"loss": 0.3109,
"step": 470
},
{
"epoch": 0.35,
"grad_norm": 5.5469183921813965,
"learning_rate": 3.627214170692432e-05,
"loss": 0.2793,
"step": 480
},
{
"epoch": 0.35,
"grad_norm": 10.897783279418945,
"learning_rate": 3.58695652173913e-05,
"loss": 0.2984,
"step": 490
},
{
"epoch": 0.36,
"grad_norm": 5.365045070648193,
"learning_rate": 3.5466988727858296e-05,
"loss": 0.2797,
"step": 500
},
{
"epoch": 0.37,
"grad_norm": 7.85481071472168,
"learning_rate": 3.5064412238325285e-05,
"loss": 0.3062,
"step": 510
},
{
"epoch": 0.38,
"grad_norm": 5.305897235870361,
"learning_rate": 3.4661835748792274e-05,
"loss": 0.3354,
"step": 520
},
{
"epoch": 0.38,
"grad_norm": 4.42397403717041,
"learning_rate": 3.425925925925926e-05,
"loss": 0.2454,
"step": 530
},
{
"epoch": 0.39,
"grad_norm": 3.9212090969085693,
"learning_rate": 3.385668276972625e-05,
"loss": 0.2564,
"step": 540
},
{
"epoch": 0.4,
"grad_norm": 7.084193229675293,
"learning_rate": 3.345410628019324e-05,
"loss": 0.2709,
"step": 550
},
{
"epoch": 0.41,
"grad_norm": 6.757343292236328,
"learning_rate": 3.305152979066023e-05,
"loss": 0.3131,
"step": 560
},
{
"epoch": 0.41,
"grad_norm": 8.7233304977417,
"learning_rate": 3.2648953301127216e-05,
"loss": 0.2729,
"step": 570
},
{
"epoch": 0.42,
"grad_norm": 6.0495100021362305,
"learning_rate": 3.2246376811594205e-05,
"loss": 0.2555,
"step": 580
},
{
"epoch": 0.43,
"grad_norm": 9.789109230041504,
"learning_rate": 3.184380032206119e-05,
"loss": 0.2636,
"step": 590
},
{
"epoch": 0.43,
"grad_norm": 8.58854866027832,
"learning_rate": 3.144122383252818e-05,
"loss": 0.2687,
"step": 600
},
{
"epoch": 0.44,
"grad_norm": 9.97248363494873,
"learning_rate": 3.103864734299517e-05,
"loss": 0.3013,
"step": 610
},
{
"epoch": 0.45,
"grad_norm": 25.611948013305664,
"learning_rate": 3.063607085346216e-05,
"loss": 0.3343,
"step": 620
},
{
"epoch": 0.46,
"grad_norm": 5.729062080383301,
"learning_rate": 3.023349436392915e-05,
"loss": 0.3457,
"step": 630
},
{
"epoch": 0.46,
"grad_norm": 7.5272216796875,
"learning_rate": 2.9830917874396136e-05,
"loss": 0.2456,
"step": 640
},
{
"epoch": 0.47,
"grad_norm": 7.43132209777832,
"learning_rate": 2.9428341384863124e-05,
"loss": 0.3469,
"step": 650
},
{
"epoch": 0.48,
"grad_norm": 7.458743572235107,
"learning_rate": 2.9025764895330116e-05,
"loss": 0.2884,
"step": 660
},
{
"epoch": 0.49,
"grad_norm": 6.541900634765625,
"learning_rate": 2.86231884057971e-05,
"loss": 0.2466,
"step": 670
},
{
"epoch": 0.49,
"grad_norm": 7.272885322570801,
"learning_rate": 2.822061191626409e-05,
"loss": 0.2345,
"step": 680
},
{
"epoch": 0.5,
"grad_norm": 10.58638858795166,
"learning_rate": 2.781803542673108e-05,
"loss": 0.2519,
"step": 690
},
{
"epoch": 0.51,
"grad_norm": 5.776723384857178,
"learning_rate": 2.741545893719807e-05,
"loss": 0.3001,
"step": 700
},
{
"epoch": 0.51,
"grad_norm": 5.556161880493164,
"learning_rate": 2.7012882447665055e-05,
"loss": 0.2799,
"step": 710
},
{
"epoch": 0.52,
"grad_norm": 5.732090950012207,
"learning_rate": 2.6610305958132047e-05,
"loss": 0.2197,
"step": 720
},
{
"epoch": 0.53,
"grad_norm": 17.775867462158203,
"learning_rate": 2.6207729468599036e-05,
"loss": 0.2938,
"step": 730
},
{
"epoch": 0.54,
"grad_norm": 19.362329483032227,
"learning_rate": 2.580515297906602e-05,
"loss": 0.2327,
"step": 740
},
{
"epoch": 0.54,
"grad_norm": 8.213146209716797,
"learning_rate": 2.5402576489533013e-05,
"loss": 0.2142,
"step": 750
},
{
"epoch": 0.55,
"grad_norm": 9.953818321228027,
"learning_rate": 2.5e-05,
"loss": 0.2403,
"step": 760
},
{
"epoch": 0.56,
"grad_norm": 7.865139007568359,
"learning_rate": 2.459742351046699e-05,
"loss": 0.2481,
"step": 770
},
{
"epoch": 0.56,
"grad_norm": 6.347203254699707,
"learning_rate": 2.4194847020933978e-05,
"loss": 0.2487,
"step": 780
},
{
"epoch": 0.57,
"grad_norm": 7.396281719207764,
"learning_rate": 2.3792270531400967e-05,
"loss": 0.2752,
"step": 790
},
{
"epoch": 0.58,
"grad_norm": 6.594508647918701,
"learning_rate": 2.338969404186796e-05,
"loss": 0.2389,
"step": 800
},
{
"epoch": 0.59,
"grad_norm": 7.60775089263916,
"learning_rate": 2.2987117552334944e-05,
"loss": 0.2621,
"step": 810
},
{
"epoch": 0.59,
"grad_norm": 13.000992774963379,
"learning_rate": 2.2584541062801932e-05,
"loss": 0.2691,
"step": 820
},
{
"epoch": 0.6,
"grad_norm": 11.92383098602295,
"learning_rate": 2.2181964573268924e-05,
"loss": 0.2484,
"step": 830
},
{
"epoch": 0.61,
"grad_norm": 10.424347877502441,
"learning_rate": 2.177938808373591e-05,
"loss": 0.2363,
"step": 840
},
{
"epoch": 0.62,
"grad_norm": 6.517064094543457,
"learning_rate": 2.13768115942029e-05,
"loss": 0.2489,
"step": 850
},
{
"epoch": 0.62,
"grad_norm": 6.191581726074219,
"learning_rate": 2.0974235104669886e-05,
"loss": 0.2632,
"step": 860
},
{
"epoch": 0.63,
"grad_norm": 7.870666027069092,
"learning_rate": 2.0571658615136878e-05,
"loss": 0.2527,
"step": 870
},
{
"epoch": 0.64,
"grad_norm": 7.662616729736328,
"learning_rate": 2.0169082125603867e-05,
"loss": 0.2355,
"step": 880
},
{
"epoch": 0.64,
"grad_norm": 11.486822128295898,
"learning_rate": 1.9766505636070852e-05,
"loss": 0.2169,
"step": 890
},
{
"epoch": 0.65,
"grad_norm": 12.382376670837402,
"learning_rate": 1.9363929146537844e-05,
"loss": 0.2483,
"step": 900
},
{
"epoch": 0.66,
"grad_norm": 10.059280395507812,
"learning_rate": 1.8961352657004832e-05,
"loss": 0.2159,
"step": 910
},
{
"epoch": 0.67,
"grad_norm": 18.096155166625977,
"learning_rate": 1.855877616747182e-05,
"loss": 0.2221,
"step": 920
},
{
"epoch": 0.67,
"grad_norm": 14.859966278076172,
"learning_rate": 1.815619967793881e-05,
"loss": 0.2633,
"step": 930
},
{
"epoch": 0.68,
"grad_norm": 10.2135648727417,
"learning_rate": 1.7753623188405798e-05,
"loss": 0.2817,
"step": 940
},
{
"epoch": 0.69,
"grad_norm": 15.141898155212402,
"learning_rate": 1.7351046698872786e-05,
"loss": 0.2481,
"step": 950
},
{
"epoch": 0.7,
"grad_norm": 11.335433959960938,
"learning_rate": 1.6948470209339775e-05,
"loss": 0.2236,
"step": 960
},
{
"epoch": 0.7,
"grad_norm": 10.088510513305664,
"learning_rate": 1.6545893719806767e-05,
"loss": 0.2564,
"step": 970
},
{
"epoch": 0.71,
"grad_norm": 11.138778686523438,
"learning_rate": 1.6143317230273752e-05,
"loss": 0.2301,
"step": 980
},
{
"epoch": 0.72,
"grad_norm": 9.613357543945312,
"learning_rate": 1.574074074074074e-05,
"loss": 0.2344,
"step": 990
},
{
"epoch": 0.72,
"grad_norm": 7.300370216369629,
"learning_rate": 1.533816425120773e-05,
"loss": 0.198,
"step": 1000
},
{
"epoch": 0.73,
"grad_norm": 8.088990211486816,
"learning_rate": 1.4935587761674719e-05,
"loss": 0.2218,
"step": 1010
},
{
"epoch": 0.74,
"grad_norm": 10.558765411376953,
"learning_rate": 1.4533011272141708e-05,
"loss": 0.2151,
"step": 1020
},
{
"epoch": 0.75,
"grad_norm": 13.40772819519043,
"learning_rate": 1.4130434782608694e-05,
"loss": 0.2308,
"step": 1030
},
{
"epoch": 0.75,
"grad_norm": 10.2852783203125,
"learning_rate": 1.3727858293075685e-05,
"loss": 0.195,
"step": 1040
},
{
"epoch": 0.76,
"grad_norm": 10.811461448669434,
"learning_rate": 1.3325281803542675e-05,
"loss": 0.1728,
"step": 1050
},
{
"epoch": 0.77,
"grad_norm": 6.249987602233887,
"learning_rate": 1.2922705314009662e-05,
"loss": 0.2358,
"step": 1060
},
{
"epoch": 0.77,
"grad_norm": 8.414009094238281,
"learning_rate": 1.2520128824476652e-05,
"loss": 0.2149,
"step": 1070
},
{
"epoch": 0.78,
"grad_norm": 7.3787689208984375,
"learning_rate": 1.211755233494364e-05,
"loss": 0.1711,
"step": 1080
},
{
"epoch": 0.79,
"grad_norm": 8.621789932250977,
"learning_rate": 1.1714975845410629e-05,
"loss": 0.1723,
"step": 1090
},
{
"epoch": 0.8,
"grad_norm": 7.638077735900879,
"learning_rate": 1.1312399355877617e-05,
"loss": 0.2104,
"step": 1100
},
{
"epoch": 0.8,
"grad_norm": 13.404261589050293,
"learning_rate": 1.0909822866344606e-05,
"loss": 0.2187,
"step": 1110
},
{
"epoch": 0.81,
"grad_norm": 15.54051399230957,
"learning_rate": 1.0507246376811594e-05,
"loss": 0.2342,
"step": 1120
},
{
"epoch": 0.82,
"grad_norm": 9.454499244689941,
"learning_rate": 1.0104669887278585e-05,
"loss": 0.2579,
"step": 1130
},
{
"epoch": 0.83,
"grad_norm": 8.479141235351562,
"learning_rate": 9.702093397745571e-06,
"loss": 0.1972,
"step": 1140
},
{
"epoch": 0.83,
"grad_norm": 9.336498260498047,
"learning_rate": 9.29951690821256e-06,
"loss": 0.1805,
"step": 1150
},
{
"epoch": 0.84,
"grad_norm": 7.811723709106445,
"learning_rate": 8.89694041867955e-06,
"loss": 0.2286,
"step": 1160
},
{
"epoch": 0.85,
"grad_norm": 6.146000862121582,
"learning_rate": 8.494363929146539e-06,
"loss": 0.2133,
"step": 1170
},
{
"epoch": 0.85,
"grad_norm": 10.112232208251953,
"learning_rate": 8.091787439613527e-06,
"loss": 0.2021,
"step": 1180
},
{
"epoch": 0.86,
"grad_norm": 12.056436538696289,
"learning_rate": 7.689210950080516e-06,
"loss": 0.1791,
"step": 1190
},
{
"epoch": 0.87,
"grad_norm": 9.647012710571289,
"learning_rate": 7.286634460547505e-06,
"loss": 0.1922,
"step": 1200
},
{
"epoch": 0.88,
"grad_norm": 5.874572277069092,
"learning_rate": 6.884057971014493e-06,
"loss": 0.205,
"step": 1210
},
{
"epoch": 0.88,
"grad_norm": 5.28275203704834,
"learning_rate": 6.481481481481481e-06,
"loss": 0.1951,
"step": 1220
},
{
"epoch": 0.89,
"grad_norm": 13.48544979095459,
"learning_rate": 6.078904991948471e-06,
"loss": 0.1871,
"step": 1230
},
{
"epoch": 0.9,
"grad_norm": 9.40962028503418,
"learning_rate": 5.676328502415459e-06,
"loss": 0.2078,
"step": 1240
},
{
"epoch": 0.91,
"grad_norm": 5.173947811126709,
"learning_rate": 5.273752012882448e-06,
"loss": 0.1712,
"step": 1250
},
{
"epoch": 0.91,
"grad_norm": 9.35919189453125,
"learning_rate": 4.871175523349437e-06,
"loss": 0.259,
"step": 1260
},
{
"epoch": 0.92,
"grad_norm": 9.794632911682129,
"learning_rate": 4.468599033816425e-06,
"loss": 0.2173,
"step": 1270
},
{
"epoch": 0.93,
"grad_norm": 8.138204574584961,
"learning_rate": 4.066022544283414e-06,
"loss": 0.1661,
"step": 1280
},
{
"epoch": 0.93,
"grad_norm": 7.02229118347168,
"learning_rate": 3.663446054750403e-06,
"loss": 0.1961,
"step": 1290
},
{
"epoch": 0.94,
"grad_norm": 8.913147926330566,
"learning_rate": 3.2608695652173914e-06,
"loss": 0.1956,
"step": 1300
},
{
"epoch": 0.95,
"grad_norm": 7.670225620269775,
"learning_rate": 2.85829307568438e-06,
"loss": 0.1706,
"step": 1310
},
{
"epoch": 0.96,
"grad_norm": 10.90648365020752,
"learning_rate": 2.455716586151369e-06,
"loss": 0.2055,
"step": 1320
},
{
"epoch": 0.96,
"grad_norm": 8.81640911102295,
"learning_rate": 2.053140096618358e-06,
"loss": 0.176,
"step": 1330
},
{
"epoch": 0.97,
"grad_norm": 7.678645610809326,
"learning_rate": 1.6505636070853463e-06,
"loss": 0.1897,
"step": 1340
},
{
"epoch": 0.98,
"grad_norm": 9.090548515319824,
"learning_rate": 1.247987117552335e-06,
"loss": 0.2035,
"step": 1350
},
{
"epoch": 0.98,
"grad_norm": 5.917918682098389,
"learning_rate": 8.454106280193237e-07,
"loss": 0.2102,
"step": 1360
},
{
"epoch": 0.99,
"grad_norm": 8.879599571228027,
"learning_rate": 4.428341384863124e-07,
"loss": 0.2003,
"step": 1370
},
{
"epoch": 1.0,
"grad_norm": 9.322664260864258,
"learning_rate": 4.025764895330113e-08,
"loss": 0.193,
"step": 1380
},
{
"epoch": 1.0,
"eval_accuracy": 0.9340658097220965,
"eval_f1": 0.9585606087544003,
"eval_loss": 0.16026902198791504,
"eval_precision": 0.9434178845400079,
"eval_recall": 0.9741973720348661,
"eval_roc_auc": 0.9777643781483449,
"eval_runtime": 159.6047,
"eval_samples_per_second": 553.718,
"eval_steps_per_second": 34.611,
"step": 1381
},
{
"epoch": 1.0,
"step": 1381,
"total_flos": 2.1966728826140099e+18,
"train_loss": 0.2970259432383502,
"train_runtime": 756.4932,
"train_samples_per_second": 116.823,
"train_steps_per_second": 1.826
}
],
"logging_steps": 10,
"max_steps": 1381,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 2.1966728826140099e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}