gpt2-1.5B-180M-USPTO / trainer_state.json
Allison Casasola
files
d492556
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.3235294117647059,
"eval_steps": 500,
"global_step": 22000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.9632352941176476e-05,
"loss": 6.4268,
"step": 500
},
{
"epoch": 0.01,
"learning_rate": 4.9264705882352944e-05,
"loss": 5.6382,
"step": 1000
},
{
"epoch": 0.02,
"learning_rate": 4.889705882352941e-05,
"loss": 5.3162,
"step": 1500
},
{
"epoch": 0.03,
"learning_rate": 4.8529411764705885e-05,
"loss": 4.9804,
"step": 2000
},
{
"epoch": 0.04,
"learning_rate": 4.816176470588236e-05,
"loss": 4.6877,
"step": 2500
},
{
"epoch": 0.04,
"learning_rate": 4.7794117647058826e-05,
"loss": 4.4392,
"step": 3000
},
{
"epoch": 0.05,
"learning_rate": 4.742647058823529e-05,
"loss": 4.3057,
"step": 3500
},
{
"epoch": 0.06,
"learning_rate": 4.705882352941177e-05,
"loss": 4.1658,
"step": 4000
},
{
"epoch": 0.07,
"learning_rate": 4.669117647058824e-05,
"loss": 4.0342,
"step": 4500
},
{
"epoch": 0.07,
"learning_rate": 4.632352941176471e-05,
"loss": 3.9385,
"step": 5000
},
{
"epoch": 0.08,
"learning_rate": 4.5955882352941176e-05,
"loss": 3.801,
"step": 5500
},
{
"epoch": 0.09,
"learning_rate": 4.558823529411765e-05,
"loss": 3.7073,
"step": 6000
},
{
"epoch": 0.1,
"learning_rate": 4.522058823529412e-05,
"loss": 3.6018,
"step": 6500
},
{
"epoch": 0.1,
"learning_rate": 4.485294117647059e-05,
"loss": 3.564,
"step": 7000
},
{
"epoch": 0.11,
"learning_rate": 4.448529411764706e-05,
"loss": 3.4792,
"step": 7500
},
{
"epoch": 0.12,
"learning_rate": 4.411764705882353e-05,
"loss": 3.4302,
"step": 8000
},
{
"epoch": 0.12,
"learning_rate": 4.375e-05,
"loss": 3.3876,
"step": 8500
},
{
"epoch": 0.13,
"learning_rate": 4.3382352941176474e-05,
"loss": 3.3018,
"step": 9000
},
{
"epoch": 0.14,
"learning_rate": 4.301470588235295e-05,
"loss": 3.3003,
"step": 9500
},
{
"epoch": 0.15,
"learning_rate": 4.2647058823529415e-05,
"loss": 3.2908,
"step": 10000
},
{
"epoch": 0.15,
"learning_rate": 4.227941176470588e-05,
"loss": 3.4036,
"step": 10500
},
{
"epoch": 0.16,
"learning_rate": 4.1911764705882356e-05,
"loss": 3.3214,
"step": 11000
},
{
"epoch": 0.17,
"learning_rate": 4.154411764705883e-05,
"loss": 3.2409,
"step": 11500
},
{
"epoch": 0.18,
"learning_rate": 4.11764705882353e-05,
"loss": 3.2363,
"step": 12000
},
{
"epoch": 0.18,
"learning_rate": 4.0808823529411765e-05,
"loss": 3.1487,
"step": 12500
},
{
"epoch": 0.19,
"learning_rate": 4.044117647058824e-05,
"loss": 3.1593,
"step": 13000
},
{
"epoch": 0.2,
"learning_rate": 4.007352941176471e-05,
"loss": 3.1009,
"step": 13500
},
{
"epoch": 0.21,
"learning_rate": 3.970588235294117e-05,
"loss": 3.085,
"step": 14000
},
{
"epoch": 0.21,
"learning_rate": 3.933823529411765e-05,
"loss": 3.0667,
"step": 14500
},
{
"epoch": 0.22,
"learning_rate": 3.897058823529412e-05,
"loss": 3.0767,
"step": 15000
},
{
"epoch": 0.23,
"learning_rate": 3.8602941176470595e-05,
"loss": 3.0124,
"step": 15500
},
{
"epoch": 0.24,
"learning_rate": 3.8235294117647055e-05,
"loss": 3.0449,
"step": 16000
},
{
"epoch": 0.24,
"learning_rate": 3.786764705882353e-05,
"loss": 2.9992,
"step": 16500
},
{
"epoch": 0.25,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.9758,
"step": 17000
},
{
"epoch": 0.26,
"learning_rate": 3.713235294117647e-05,
"loss": 2.9588,
"step": 17500
},
{
"epoch": 0.26,
"learning_rate": 3.6764705882352945e-05,
"loss": 2.9493,
"step": 18000
},
{
"epoch": 0.27,
"learning_rate": 3.639705882352941e-05,
"loss": 2.932,
"step": 18500
},
{
"epoch": 0.28,
"learning_rate": 3.6029411764705886e-05,
"loss": 2.8935,
"step": 19000
},
{
"epoch": 0.29,
"learning_rate": 3.566176470588235e-05,
"loss": 2.9175,
"step": 19500
},
{
"epoch": 0.29,
"learning_rate": 3.529411764705883e-05,
"loss": 2.868,
"step": 20000
},
{
"epoch": 0.3,
"learning_rate": 3.4926470588235294e-05,
"loss": 2.9202,
"step": 20500
},
{
"epoch": 0.31,
"learning_rate": 3.455882352941177e-05,
"loss": 2.8633,
"step": 21000
},
{
"epoch": 0.32,
"learning_rate": 3.4191176470588236e-05,
"loss": 2.8141,
"step": 21500
},
{
"epoch": 0.32,
"learning_rate": 3.382352941176471e-05,
"loss": 2.84,
"step": 22000
}
],
"logging_steps": 500,
"max_steps": 68000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 2000,
"total_flos": 1.5955896827904e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}