MEDVQACpaligemma-adapter / trainer_state.json
SushantGautam's picture
Upload folder using huggingface_hub
d7d4301 verified
raw
history blame
5.97 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.3710729104919976,
"eval_steps": 500,
"global_step": 3000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07903576368306658,
"grad_norm": 2.3245134353637695,
"learning_rate": 1.948325863432639e-05,
"loss": 2.9096,
"step": 100
},
{
"epoch": 0.15807152736613317,
"grad_norm": 2.990708112716675,
"learning_rate": 1.8955971526496178e-05,
"loss": 0.7375,
"step": 200
},
{
"epoch": 0.23710729104919975,
"grad_norm": 2.713043451309204,
"learning_rate": 1.8428684418665965e-05,
"loss": 0.4412,
"step": 300
},
{
"epoch": 0.31614305473226634,
"grad_norm": 2.839672088623047,
"learning_rate": 1.790139731083575e-05,
"loss": 0.3861,
"step": 400
},
{
"epoch": 0.3951788184153329,
"grad_norm": 2.5622401237487793,
"learning_rate": 1.7374110203005538e-05,
"loss": 0.3694,
"step": 500
},
{
"epoch": 0.4742145820983995,
"grad_norm": 2.527787923812866,
"learning_rate": 1.6846823095175325e-05,
"loss": 0.3538,
"step": 600
},
{
"epoch": 0.5532503457814661,
"grad_norm": 2.7099175453186035,
"learning_rate": 1.631953598734511e-05,
"loss": 0.3478,
"step": 700
},
{
"epoch": 0.6322861094645327,
"grad_norm": 2.159538745880127,
"learning_rate": 1.5792248879514898e-05,
"loss": 0.338,
"step": 800
},
{
"epoch": 0.7113218731475993,
"grad_norm": 2.3641576766967773,
"learning_rate": 1.5264961771684685e-05,
"loss": 0.3423,
"step": 900
},
{
"epoch": 0.7903576368306658,
"grad_norm": 2.5248115062713623,
"learning_rate": 1.4737674663854471e-05,
"loss": 0.3244,
"step": 1000
},
{
"epoch": 0.8693934005137325,
"grad_norm": 2.2045676708221436,
"learning_rate": 1.4210387556024256e-05,
"loss": 0.3215,
"step": 1100
},
{
"epoch": 0.948429164196799,
"grad_norm": 2.5387887954711914,
"learning_rate": 1.3683100448194043e-05,
"loss": 0.3239,
"step": 1200
},
{
"epoch": 1.0274649278798655,
"grad_norm": 2.1010782718658447,
"learning_rate": 1.315581334036383e-05,
"loss": 0.316,
"step": 1300
},
{
"epoch": 1.1065006915629323,
"grad_norm": 2.8399364948272705,
"learning_rate": 1.2628526232533616e-05,
"loss": 0.3143,
"step": 1400
},
{
"epoch": 1.1855364552459988,
"grad_norm": 2.4526548385620117,
"learning_rate": 1.2101239124703401e-05,
"loss": 0.3076,
"step": 1500
},
{
"epoch": 1.2645722189290654,
"grad_norm": 2.1812686920166016,
"learning_rate": 1.157395201687319e-05,
"loss": 0.3074,
"step": 1600
},
{
"epoch": 1.343607982612132,
"grad_norm": 2.12322735786438,
"learning_rate": 1.1046664909042974e-05,
"loss": 0.3019,
"step": 1700
},
{
"epoch": 1.4226437462951986,
"grad_norm": 2.5394980907440186,
"learning_rate": 1.0519377801212763e-05,
"loss": 0.3114,
"step": 1800
},
{
"epoch": 1.5016795099782652,
"grad_norm": 2.698906660079956,
"learning_rate": 9.992090693382548e-06,
"loss": 0.302,
"step": 1900
},
{
"epoch": 1.580715273661332,
"grad_norm": 2.7786617279052734,
"learning_rate": 9.464803585552334e-06,
"loss": 0.2995,
"step": 2000
},
{
"epoch": 1.6597510373443982,
"grad_norm": 2.2823212146759033,
"learning_rate": 8.937516477722121e-06,
"loss": 0.3117,
"step": 2100
},
{
"epoch": 1.738786801027465,
"grad_norm": 2.129678726196289,
"learning_rate": 8.410229369891908e-06,
"loss": 0.3045,
"step": 2200
},
{
"epoch": 1.8178225647105315,
"grad_norm": 1.627656102180481,
"learning_rate": 7.882942262061694e-06,
"loss": 0.297,
"step": 2300
},
{
"epoch": 1.896858328393598,
"grad_norm": 1.6226264238357544,
"learning_rate": 7.355655154231479e-06,
"loss": 0.2987,
"step": 2400
},
{
"epoch": 1.9758940920766648,
"grad_norm": 1.6640758514404297,
"learning_rate": 6.828368046401266e-06,
"loss": 0.3022,
"step": 2500
},
{
"epoch": 2.054929855759731,
"grad_norm": 2.3779549598693848,
"learning_rate": 6.3010809385710525e-06,
"loss": 0.2985,
"step": 2600
},
{
"epoch": 2.133965619442798,
"grad_norm": 2.0160937309265137,
"learning_rate": 5.773793830740839e-06,
"loss": 0.2986,
"step": 2700
},
{
"epoch": 2.2130013831258646,
"grad_norm": 2.8362996578216553,
"learning_rate": 5.246506722910625e-06,
"loss": 0.2997,
"step": 2800
},
{
"epoch": 2.292037146808931,
"grad_norm": 1.8160929679870605,
"learning_rate": 4.7192196150804116e-06,
"loss": 0.2847,
"step": 2900
},
{
"epoch": 2.3710729104919976,
"grad_norm": 1.660947322845459,
"learning_rate": 4.191932507250198e-06,
"loss": 0.2913,
"step": 3000
}
],
"logging_steps": 100,
"max_steps": 3795,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.9485385996265165e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}