|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.2747603833865815, |
|
"eval_steps": 500, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06389776357827476, |
|
"grad_norm": 34.803749574601504, |
|
"learning_rate": 1.8750000000000003e-06, |
|
"loss": 3.2079, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12779552715654952, |
|
"grad_norm": 8.393596619540387, |
|
"learning_rate": 3.958333333333333e-06, |
|
"loss": 2.3077, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19169329073482427, |
|
"grad_norm": 4.54582398918815, |
|
"learning_rate": 6.041666666666667e-06, |
|
"loss": 1.6795, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25559105431309903, |
|
"grad_norm": 2.88137111898239, |
|
"learning_rate": 8.125000000000001e-06, |
|
"loss": 1.3798, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3194888178913738, |
|
"grad_norm": 3.470416724559835, |
|
"learning_rate": 9.999862102299874e-06, |
|
"loss": 1.2363, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38338658146964855, |
|
"grad_norm": 3.111739768988857, |
|
"learning_rate": 9.983323579940351e-06, |
|
"loss": 1.1359, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.4472843450479233, |
|
"grad_norm": 2.63691114437442, |
|
"learning_rate": 9.939310009499348e-06, |
|
"loss": 1.0504, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5111821086261981, |
|
"grad_norm": 2.3175377091892346, |
|
"learning_rate": 9.868064055324204e-06, |
|
"loss": 1.0116, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5750798722044729, |
|
"grad_norm": 2.30928876775904, |
|
"learning_rate": 9.76997852474223e-06, |
|
"loss": 0.9679, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6389776357827476, |
|
"grad_norm": 2.2162135977370196, |
|
"learning_rate": 9.645594202357438e-06, |
|
"loss": 0.944, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7028753993610224, |
|
"grad_norm": 1.9916201881672246, |
|
"learning_rate": 9.495596868489588e-06, |
|
"loss": 0.921, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.7667731629392971, |
|
"grad_norm": 2.2617620915408914, |
|
"learning_rate": 9.320813518194084e-06, |
|
"loss": 0.9022, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.8306709265175719, |
|
"grad_norm": 2.4967363173333292, |
|
"learning_rate": 9.122207801708802e-06, |
|
"loss": 0.895, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.8945686900958466, |
|
"grad_norm": 2.2712304513946906, |
|
"learning_rate": 8.900874711466436e-06, |
|
"loss": 0.864, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.9584664536741214, |
|
"grad_norm": 1.9051776400686733, |
|
"learning_rate": 8.658034544965003e-06, |
|
"loss": 0.8608, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0191693290734825, |
|
"grad_norm": 1.9166530255225462, |
|
"learning_rate": 8.395026176781627e-06, |
|
"loss": 0.8301, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0830670926517572, |
|
"grad_norm": 2.064754006824319, |
|
"learning_rate": 8.113299676823614e-06, |
|
"loss": 0.7483, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.1469648562300319, |
|
"grad_norm": 2.082411427574368, |
|
"learning_rate": 7.814408315515419e-06, |
|
"loss": 0.7494, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.2108626198083068, |
|
"grad_norm": 2.2742401088702873, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.7281, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.2747603833865815, |
|
"grad_norm": 2.177821331073625, |
|
"learning_rate": 7.1718081885702905e-06, |
|
"loss": 0.7254, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 471, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 15503832121344.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|