|
{
|
|
"best_metric": 0.8260869565217391,
|
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-ve-U11-b-40\\checkpoint-195",
|
|
"epoch": 36.92307692307692,
|
|
"eval_steps": 500,
|
|
"global_step": 240,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.92,
|
|
"eval_accuracy": 0.4782608695652174,
|
|
"eval_loss": 1.5799462795257568,
|
|
"eval_runtime": 0.5821,
|
|
"eval_samples_per_second": 79.027,
|
|
"eval_steps_per_second": 3.436,
|
|
"step": 6
|
|
},
|
|
{
|
|
"epoch": 1.54,
|
|
"learning_rate": 2.0833333333333336e-05,
|
|
"loss": 2.1773,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.34782608695652173,
|
|
"eval_loss": 1.5648330450057983,
|
|
"eval_runtime": 0.6134,
|
|
"eval_samples_per_second": 74.998,
|
|
"eval_steps_per_second": 3.261,
|
|
"step": 13
|
|
},
|
|
{
|
|
"epoch": 2.92,
|
|
"eval_accuracy": 0.32608695652173914,
|
|
"eval_loss": 1.5181727409362793,
|
|
"eval_runtime": 0.6128,
|
|
"eval_samples_per_second": 75.067,
|
|
"eval_steps_per_second": 3.264,
|
|
"step": 19
|
|
},
|
|
{
|
|
"epoch": 3.08,
|
|
"learning_rate": 4.166666666666667e-05,
|
|
"loss": 2.1773,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.32608695652173914,
|
|
"eval_loss": 1.423189401626587,
|
|
"eval_runtime": 0.6036,
|
|
"eval_samples_per_second": 76.205,
|
|
"eval_steps_per_second": 3.313,
|
|
"step": 26
|
|
},
|
|
{
|
|
"epoch": 4.62,
|
|
"learning_rate": 4.8611111111111115e-05,
|
|
"loss": 1.8993,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 4.92,
|
|
"eval_accuracy": 0.391304347826087,
|
|
"eval_loss": 1.350540280342102,
|
|
"eval_runtime": 0.5704,
|
|
"eval_samples_per_second": 80.642,
|
|
"eval_steps_per_second": 3.506,
|
|
"step": 32
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.3695652173913043,
|
|
"eval_loss": 1.2747217416763306,
|
|
"eval_runtime": 0.6013,
|
|
"eval_samples_per_second": 76.504,
|
|
"eval_steps_per_second": 3.326,
|
|
"step": 39
|
|
},
|
|
{
|
|
"epoch": 6.15,
|
|
"learning_rate": 4.62962962962963e-05,
|
|
"loss": 1.5045,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 6.92,
|
|
"eval_accuracy": 0.3695652173913043,
|
|
"eval_loss": 1.2452056407928467,
|
|
"eval_runtime": 0.6025,
|
|
"eval_samples_per_second": 76.343,
|
|
"eval_steps_per_second": 3.319,
|
|
"step": 45
|
|
},
|
|
{
|
|
"epoch": 7.69,
|
|
"learning_rate": 4.3981481481481486e-05,
|
|
"loss": 1.2431,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.2826086956521739,
|
|
"eval_loss": 1.1981565952301025,
|
|
"eval_runtime": 0.6075,
|
|
"eval_samples_per_second": 75.724,
|
|
"eval_steps_per_second": 3.292,
|
|
"step": 52
|
|
},
|
|
{
|
|
"epoch": 8.92,
|
|
"eval_accuracy": 0.30434782608695654,
|
|
"eval_loss": 1.211159110069275,
|
|
"eval_runtime": 0.5789,
|
|
"eval_samples_per_second": 79.463,
|
|
"eval_steps_per_second": 3.455,
|
|
"step": 58
|
|
},
|
|
{
|
|
"epoch": 9.23,
|
|
"learning_rate": 4.166666666666667e-05,
|
|
"loss": 1.1225,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 10.0,
|
|
"eval_accuracy": 0.5,
|
|
"eval_loss": 1.0159635543823242,
|
|
"eval_runtime": 0.5933,
|
|
"eval_samples_per_second": 77.528,
|
|
"eval_steps_per_second": 3.371,
|
|
"step": 65
|
|
},
|
|
{
|
|
"epoch": 10.77,
|
|
"learning_rate": 3.935185185185186e-05,
|
|
"loss": 0.9942,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 10.92,
|
|
"eval_accuracy": 0.4782608695652174,
|
|
"eval_loss": 1.0138229131698608,
|
|
"eval_runtime": 0.5808,
|
|
"eval_samples_per_second": 79.195,
|
|
"eval_steps_per_second": 3.443,
|
|
"step": 71
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"eval_accuracy": 0.5652173913043478,
|
|
"eval_loss": 0.9093583822250366,
|
|
"eval_runtime": 0.5854,
|
|
"eval_samples_per_second": 78.579,
|
|
"eval_steps_per_second": 3.416,
|
|
"step": 78
|
|
},
|
|
{
|
|
"epoch": 12.31,
|
|
"learning_rate": 3.7037037037037037e-05,
|
|
"loss": 0.9212,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 12.92,
|
|
"eval_accuracy": 0.5217391304347826,
|
|
"eval_loss": 0.8859941959381104,
|
|
"eval_runtime": 0.6003,
|
|
"eval_samples_per_second": 76.627,
|
|
"eval_steps_per_second": 3.332,
|
|
"step": 84
|
|
},
|
|
{
|
|
"epoch": 13.85,
|
|
"learning_rate": 3.472222222222222e-05,
|
|
"loss": 0.816,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 14.0,
|
|
"eval_accuracy": 0.6739130434782609,
|
|
"eval_loss": 0.76930171251297,
|
|
"eval_runtime": 0.5807,
|
|
"eval_samples_per_second": 79.208,
|
|
"eval_steps_per_second": 3.444,
|
|
"step": 91
|
|
},
|
|
{
|
|
"epoch": 14.92,
|
|
"eval_accuracy": 0.6304347826086957,
|
|
"eval_loss": 0.8289525508880615,
|
|
"eval_runtime": 0.6004,
|
|
"eval_samples_per_second": 76.619,
|
|
"eval_steps_per_second": 3.331,
|
|
"step": 97
|
|
},
|
|
{
|
|
"epoch": 15.38,
|
|
"learning_rate": 3.240740740740741e-05,
|
|
"loss": 0.741,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 16.0,
|
|
"eval_accuracy": 0.6739130434782609,
|
|
"eval_loss": 0.7809844017028809,
|
|
"eval_runtime": 0.5868,
|
|
"eval_samples_per_second": 78.396,
|
|
"eval_steps_per_second": 3.409,
|
|
"step": 104
|
|
},
|
|
{
|
|
"epoch": 16.92,
|
|
"learning_rate": 3.0092592592592593e-05,
|
|
"loss": 0.631,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 16.92,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.6341724991798401,
|
|
"eval_runtime": 0.6046,
|
|
"eval_samples_per_second": 76.081,
|
|
"eval_steps_per_second": 3.308,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 18.0,
|
|
"eval_accuracy": 0.6956521739130435,
|
|
"eval_loss": 0.7676540613174438,
|
|
"eval_runtime": 0.5863,
|
|
"eval_samples_per_second": 78.453,
|
|
"eval_steps_per_second": 3.411,
|
|
"step": 117
|
|
},
|
|
{
|
|
"epoch": 18.46,
|
|
"learning_rate": 2.777777777777778e-05,
|
|
"loss": 0.6402,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 18.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.6283207535743713,
|
|
"eval_runtime": 0.5721,
|
|
"eval_samples_per_second": 80.402,
|
|
"eval_steps_per_second": 3.496,
|
|
"step": 123
|
|
},
|
|
{
|
|
"epoch": 20.0,
|
|
"learning_rate": 2.5462962962962965e-05,
|
|
"loss": 0.5477,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 20.0,
|
|
"eval_accuracy": 0.717391304347826,
|
|
"eval_loss": 0.6686931848526001,
|
|
"eval_runtime": 0.6092,
|
|
"eval_samples_per_second": 75.512,
|
|
"eval_steps_per_second": 3.283,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 20.92,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.6369442939758301,
|
|
"eval_runtime": 0.6232,
|
|
"eval_samples_per_second": 73.807,
|
|
"eval_steps_per_second": 3.209,
|
|
"step": 136
|
|
},
|
|
{
|
|
"epoch": 21.54,
|
|
"learning_rate": 2.314814814814815e-05,
|
|
"loss": 0.5023,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 22.0,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.6333862543106079,
|
|
"eval_runtime": 0.5864,
|
|
"eval_samples_per_second": 78.448,
|
|
"eval_steps_per_second": 3.411,
|
|
"step": 143
|
|
},
|
|
{
|
|
"epoch": 22.92,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6355035305023193,
|
|
"eval_runtime": 0.592,
|
|
"eval_samples_per_second": 77.698,
|
|
"eval_steps_per_second": 3.378,
|
|
"step": 149
|
|
},
|
|
{
|
|
"epoch": 23.08,
|
|
"learning_rate": 2.0833333333333336e-05,
|
|
"loss": 0.4802,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 24.0,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.5975824594497681,
|
|
"eval_runtime": 0.5851,
|
|
"eval_samples_per_second": 78.623,
|
|
"eval_steps_per_second": 3.418,
|
|
"step": 156
|
|
},
|
|
{
|
|
"epoch": 24.62,
|
|
"learning_rate": 1.8518518518518518e-05,
|
|
"loss": 0.4336,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 24.92,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.6111952066421509,
|
|
"eval_runtime": 0.5995,
|
|
"eval_samples_per_second": 76.726,
|
|
"eval_steps_per_second": 3.336,
|
|
"step": 162
|
|
},
|
|
{
|
|
"epoch": 26.0,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6147894859313965,
|
|
"eval_runtime": 0.5855,
|
|
"eval_samples_per_second": 78.56,
|
|
"eval_steps_per_second": 3.416,
|
|
"step": 169
|
|
},
|
|
{
|
|
"epoch": 26.15,
|
|
"learning_rate": 1.6203703703703704e-05,
|
|
"loss": 0.4203,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 26.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.637995719909668,
|
|
"eval_runtime": 0.6015,
|
|
"eval_samples_per_second": 76.478,
|
|
"eval_steps_per_second": 3.325,
|
|
"step": 175
|
|
},
|
|
{
|
|
"epoch": 27.69,
|
|
"learning_rate": 1.388888888888889e-05,
|
|
"loss": 0.429,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 28.0,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6031981706619263,
|
|
"eval_runtime": 0.5868,
|
|
"eval_samples_per_second": 78.388,
|
|
"eval_steps_per_second": 3.408,
|
|
"step": 182
|
|
},
|
|
{
|
|
"epoch": 28.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.6347993612289429,
|
|
"eval_runtime": 0.5954,
|
|
"eval_samples_per_second": 77.256,
|
|
"eval_steps_per_second": 3.359,
|
|
"step": 188
|
|
},
|
|
{
|
|
"epoch": 29.23,
|
|
"learning_rate": 1.1574074074074075e-05,
|
|
"loss": 0.4013,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 30.0,
|
|
"eval_accuracy": 0.8260869565217391,
|
|
"eval_loss": 0.6120511293411255,
|
|
"eval_runtime": 0.5862,
|
|
"eval_samples_per_second": 78.467,
|
|
"eval_steps_per_second": 3.412,
|
|
"step": 195
|
|
},
|
|
{
|
|
"epoch": 30.77,
|
|
"learning_rate": 9.259259259259259e-06,
|
|
"loss": 0.3747,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 30.92,
|
|
"eval_accuracy": 0.7391304347826086,
|
|
"eval_loss": 0.6520815491676331,
|
|
"eval_runtime": 0.584,
|
|
"eval_samples_per_second": 78.773,
|
|
"eval_steps_per_second": 3.425,
|
|
"step": 201
|
|
},
|
|
{
|
|
"epoch": 32.0,
|
|
"eval_accuracy": 0.7608695652173914,
|
|
"eval_loss": 0.642352283000946,
|
|
"eval_runtime": 0.5991,
|
|
"eval_samples_per_second": 76.785,
|
|
"eval_steps_per_second": 3.338,
|
|
"step": 208
|
|
},
|
|
{
|
|
"epoch": 32.31,
|
|
"learning_rate": 6.944444444444445e-06,
|
|
"loss": 0.3668,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 32.92,
|
|
"eval_accuracy": 0.8260869565217391,
|
|
"eval_loss": 0.6149412989616394,
|
|
"eval_runtime": 0.5861,
|
|
"eval_samples_per_second": 78.481,
|
|
"eval_steps_per_second": 3.412,
|
|
"step": 214
|
|
},
|
|
{
|
|
"epoch": 33.85,
|
|
"learning_rate": 4.6296296296296296e-06,
|
|
"loss": 0.3287,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 34.0,
|
|
"eval_accuracy": 0.782608695652174,
|
|
"eval_loss": 0.6426060795783997,
|
|
"eval_runtime": 0.586,
|
|
"eval_samples_per_second": 78.496,
|
|
"eval_steps_per_second": 3.413,
|
|
"step": 221
|
|
},
|
|
{
|
|
"epoch": 34.92,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6378837823867798,
|
|
"eval_runtime": 0.6002,
|
|
"eval_samples_per_second": 76.643,
|
|
"eval_steps_per_second": 3.332,
|
|
"step": 227
|
|
},
|
|
{
|
|
"epoch": 35.38,
|
|
"learning_rate": 2.3148148148148148e-06,
|
|
"loss": 0.372,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 36.0,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6434963941574097,
|
|
"eval_runtime": 0.6209,
|
|
"eval_samples_per_second": 74.08,
|
|
"eval_steps_per_second": 3.221,
|
|
"step": 234
|
|
},
|
|
{
|
|
"epoch": 36.92,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.3236,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 36.92,
|
|
"eval_accuracy": 0.8043478260869565,
|
|
"eval_loss": 0.6449573636054993,
|
|
"eval_runtime": 0.5774,
|
|
"eval_samples_per_second": 79.664,
|
|
"eval_steps_per_second": 3.464,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 36.92,
|
|
"step": 240,
|
|
"total_flos": 7.519854010178765e+17,
|
|
"train_loss": 0.8269948333501815,
|
|
"train_runtime": 291.9837,
|
|
"train_samples_per_second": 112.198,
|
|
"train_steps_per_second": 0.822
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 240,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 40,
|
|
"save_steps": 500,
|
|
"total_flos": 7.519854010178765e+17,
|
|
"train_batch_size": 32,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|