Finetune-LLaVA / trainer_state.json
Salmamoori's picture
Upload folder using huggingface_hub
ed9a916 verified
raw
history blame
12 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.911147011308563,
"eval_steps": 500,
"global_step": 95,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 6.666666666666667e-05,
"loss": 4.1306,
"step": 1
},
{
"epoch": 0.1,
"learning_rate": 0.00013333333333333334,
"loss": 4.2147,
"step": 2
},
{
"epoch": 0.16,
"learning_rate": 0.0002,
"loss": 4.5581,
"step": 3
},
{
"epoch": 0.21,
"learning_rate": 0.0001999417022366174,
"loss": 3.3437,
"step": 4
},
{
"epoch": 0.26,
"learning_rate": 0.00019976687691905393,
"loss": 2.2555,
"step": 5
},
{
"epoch": 0.31,
"learning_rate": 0.00019947572788580947,
"loss": 2.3575,
"step": 6
},
{
"epoch": 0.36,
"learning_rate": 0.00019906859460363307,
"loss": 2.0886,
"step": 7
},
{
"epoch": 0.41,
"learning_rate": 0.00019854595177171968,
"loss": 2.0926,
"step": 8
},
{
"epoch": 0.47,
"learning_rate": 0.00019790840876823232,
"loss": 1.774,
"step": 9
},
{
"epoch": 0.52,
"learning_rate": 0.00019715670893979414,
"loss": 1.7544,
"step": 10
},
{
"epoch": 0.57,
"learning_rate": 0.00019629172873477995,
"loss": 2.0039,
"step": 11
},
{
"epoch": 0.62,
"learning_rate": 0.00019531447668141608,
"loss": 1.6882,
"step": 12
},
{
"epoch": 0.67,
"learning_rate": 0.00019422609221188207,
"loss": 1.7233,
"step": 13
},
{
"epoch": 0.72,
"learning_rate": 0.0001930278443337833,
"loss": 1.6797,
"step": 14
},
{
"epoch": 0.78,
"learning_rate": 0.00019172113015054532,
"loss": 1.6446,
"step": 15
},
{
"epoch": 0.83,
"learning_rate": 0.00019030747323245327,
"loss": 1.8725,
"step": 16
},
{
"epoch": 0.88,
"learning_rate": 0.0001887885218402375,
"loss": 1.8063,
"step": 17
},
{
"epoch": 0.93,
"learning_rate": 0.00018716604700327514,
"loss": 1.8655,
"step": 18
},
{
"epoch": 0.98,
"learning_rate": 0.00018544194045464886,
"loss": 1.7658,
"step": 19
},
{
"epoch": 1.03,
"learning_rate": 0.0001836182124254711,
"loss": 1.3927,
"step": 20
},
{
"epoch": 1.09,
"learning_rate": 0.0001816969893010442,
"loss": 1.4465,
"step": 21
},
{
"epoch": 1.14,
"learning_rate": 0.00017968051114159047,
"loss": 1.2664,
"step": 22
},
{
"epoch": 1.19,
"learning_rate": 0.000177571129070442,
"loss": 1.5249,
"step": 23
},
{
"epoch": 1.24,
"learning_rate": 0.00017537130253273613,
"loss": 1.2534,
"step": 24
},
{
"epoch": 1.29,
"learning_rate": 0.00017308359642781242,
"loss": 1.2423,
"step": 25
},
{
"epoch": 1.34,
"learning_rate": 0.00017071067811865476,
"loss": 1.087,
"step": 26
},
{
"epoch": 1.4,
"learning_rate": 0.00016825531432186543,
"loss": 1.3184,
"step": 27
},
{
"epoch": 1.45,
"learning_rate": 0.00016572036788179727,
"loss": 1.211,
"step": 28
},
{
"epoch": 1.5,
"learning_rate": 0.00016310879443260528,
"loss": 1.2384,
"step": 29
},
{
"epoch": 1.55,
"learning_rate": 0.00016042363895210946,
"loss": 1.2669,
"step": 30
},
{
"epoch": 1.6,
"learning_rate": 0.00015766803221148673,
"loss": 1.1047,
"step": 31
},
{
"epoch": 1.65,
"learning_rate": 0.00015484518712493187,
"loss": 0.9884,
"step": 32
},
{
"epoch": 1.71,
"learning_rate": 0.00015195839500354335,
"loss": 1.1934,
"step": 33
},
{
"epoch": 1.76,
"learning_rate": 0.00014901102171780174,
"loss": 1.1506,
"step": 34
},
{
"epoch": 1.81,
"learning_rate": 0.00014600650377311522,
"loss": 1.1682,
"step": 35
},
{
"epoch": 1.86,
"learning_rate": 0.0001429483443030082,
"loss": 1.3578,
"step": 36
},
{
"epoch": 1.91,
"learning_rate": 0.00013984010898462416,
"loss": 1.1693,
"step": 37
},
{
"epoch": 1.96,
"learning_rate": 0.00013668542188130566,
"loss": 1.0341,
"step": 38
},
{
"epoch": 2.02,
"learning_rate": 0.00013348796121709862,
"loss": 1.1559,
"step": 39
},
{
"epoch": 2.07,
"learning_rate": 0.0001302514550881076,
"loss": 0.7115,
"step": 40
},
{
"epoch": 2.12,
"learning_rate": 0.00012697967711570242,
"loss": 0.694,
"step": 41
},
{
"epoch": 2.17,
"learning_rate": 0.00012367644204664468,
"loss": 0.5978,
"step": 42
},
{
"epoch": 2.22,
"learning_rate": 0.0001203456013052634,
"loss": 0.6932,
"step": 43
},
{
"epoch": 2.27,
"learning_rate": 0.00011699103850286669,
"loss": 0.6343,
"step": 44
},
{
"epoch": 2.33,
"learning_rate": 0.00011361666490962468,
"loss": 0.5644,
"step": 45
},
{
"epoch": 2.38,
"learning_rate": 0.00011022641489420342,
"loss": 0.5203,
"step": 46
},
{
"epoch": 2.43,
"learning_rate": 0.0001068242413364671,
"loss": 0.7057,
"step": 47
},
{
"epoch": 2.48,
"learning_rate": 0.00010341411101859679,
"loss": 0.4707,
"step": 48
},
{
"epoch": 2.53,
"learning_rate": 0.0001,
"loss": 0.6605,
"step": 49
},
{
"epoch": 2.58,
"learning_rate": 9.658588898140322e-05,
"loss": 0.6644,
"step": 50
},
{
"epoch": 2.64,
"learning_rate": 9.317575866353292e-05,
"loss": 0.6467,
"step": 51
},
{
"epoch": 2.69,
"learning_rate": 8.977358510579657e-05,
"loss": 0.6245,
"step": 52
},
{
"epoch": 2.74,
"learning_rate": 8.638333509037536e-05,
"loss": 0.6898,
"step": 53
},
{
"epoch": 2.79,
"learning_rate": 8.300896149713334e-05,
"loss": 0.5144,
"step": 54
},
{
"epoch": 2.84,
"learning_rate": 7.965439869473664e-05,
"loss": 0.7721,
"step": 55
},
{
"epoch": 2.89,
"learning_rate": 7.632355795335533e-05,
"loss": 0.721,
"step": 56
},
{
"epoch": 2.95,
"learning_rate": 7.302032288429756e-05,
"loss": 0.731,
"step": 57
},
{
"epoch": 3.0,
"learning_rate": 6.974854491189243e-05,
"loss": 0.652,
"step": 58
},
{
"epoch": 3.05,
"learning_rate": 6.651203878290139e-05,
"loss": 0.4655,
"step": 59
},
{
"epoch": 3.1,
"learning_rate": 6.331457811869437e-05,
"loss": 0.4752,
"step": 60
},
{
"epoch": 3.15,
"learning_rate": 6.015989101537586e-05,
"loss": 0.4497,
"step": 61
},
{
"epoch": 3.21,
"learning_rate": 5.7051655696991826e-05,
"loss": 0.2536,
"step": 62
},
{
"epoch": 3.26,
"learning_rate": 5.399349622688479e-05,
"loss": 0.3301,
"step": 63
},
{
"epoch": 3.31,
"learning_rate": 5.0988978282198305e-05,
"loss": 0.3794,
"step": 64
},
{
"epoch": 3.36,
"learning_rate": 4.804160499645667e-05,
"loss": 0.346,
"step": 65
},
{
"epoch": 3.41,
"learning_rate": 4.515481287506811e-05,
"loss": 0.3531,
"step": 66
},
{
"epoch": 3.46,
"learning_rate": 4.2331967788513295e-05,
"loss": 0.3091,
"step": 67
},
{
"epoch": 3.52,
"learning_rate": 3.9576361047890554e-05,
"loss": 0.2856,
"step": 68
},
{
"epoch": 3.57,
"learning_rate": 3.689120556739475e-05,
"loss": 0.2759,
"step": 69
},
{
"epoch": 3.62,
"learning_rate": 3.427963211820274e-05,
"loss": 0.3152,
"step": 70
},
{
"epoch": 3.67,
"learning_rate": 3.174468567813461e-05,
"loss": 0.2649,
"step": 71
},
{
"epoch": 3.72,
"learning_rate": 2.9289321881345254e-05,
"loss": 0.2846,
"step": 72
},
{
"epoch": 3.77,
"learning_rate": 2.691640357218759e-05,
"loss": 0.2286,
"step": 73
},
{
"epoch": 3.83,
"learning_rate": 2.4628697467263918e-05,
"loss": 0.2379,
"step": 74
},
{
"epoch": 3.88,
"learning_rate": 2.242887092955801e-05,
"loss": 0.278,
"step": 75
},
{
"epoch": 3.93,
"learning_rate": 2.0319488858409553e-05,
"loss": 0.3274,
"step": 76
},
{
"epoch": 3.98,
"learning_rate": 1.8303010698955804e-05,
"loss": 0.2248,
"step": 77
},
{
"epoch": 4.03,
"learning_rate": 1.638178757452894e-05,
"loss": 0.3272,
"step": 78
},
{
"epoch": 4.08,
"learning_rate": 1.4558059545351143e-05,
"loss": 0.1396,
"step": 79
},
{
"epoch": 4.14,
"learning_rate": 1.2833952996724863e-05,
"loss": 0.1572,
"step": 80
},
{
"epoch": 4.19,
"learning_rate": 1.1211478159762478e-05,
"loss": 0.1203,
"step": 81
},
{
"epoch": 4.24,
"learning_rate": 9.692526767546729e-06,
"loss": 0.1403,
"step": 82
},
{
"epoch": 4.29,
"learning_rate": 8.278869849454718e-06,
"loss": 0.1242,
"step": 83
},
{
"epoch": 4.34,
"learning_rate": 6.972155666216684e-06,
"loss": 0.1309,
"step": 84
},
{
"epoch": 4.39,
"learning_rate": 5.77390778811796e-06,
"loss": 0.135,
"step": 85
},
{
"epoch": 4.45,
"learning_rate": 4.685523318583918e-06,
"loss": 0.1654,
"step": 86
},
{
"epoch": 4.5,
"learning_rate": 3.7082712652200867e-06,
"loss": 0.1342,
"step": 87
},
{
"epoch": 4.55,
"learning_rate": 2.843291060205855e-06,
"loss": 0.2433,
"step": 88
},
{
"epoch": 4.6,
"learning_rate": 2.091591231767709e-06,
"loss": 0.1835,
"step": 89
},
{
"epoch": 4.65,
"learning_rate": 1.4540482282803137e-06,
"loss": 0.1553,
"step": 90
},
{
"epoch": 4.7,
"learning_rate": 9.314053963669245e-07,
"loss": 0.1333,
"step": 91
},
{
"epoch": 4.76,
"learning_rate": 5.24272114190516e-07,
"loss": 0.132,
"step": 92
},
{
"epoch": 4.81,
"learning_rate": 2.3312308094607382e-07,
"loss": 0.1226,
"step": 93
},
{
"epoch": 4.86,
"learning_rate": 5.8297763382597626e-08,
"loss": 0.147,
"step": 94
},
{
"epoch": 4.91,
"learning_rate": 0.0,
"loss": 0.1178,
"step": 95
},
{
"epoch": 4.91,
"step": 95,
"total_flos": 5041018556416.0,
"train_loss": 0.9510736925037284,
"train_runtime": 3904.6534,
"train_samples_per_second": 0.793,
"train_steps_per_second": 0.024
}
],
"logging_steps": 1.0,
"max_steps": 95,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 50000,
"total_flos": 5041018556416.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}