|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.2955818294959552, |
|
"eval_steps": 55, |
|
"global_step": 95, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0031113876789047915, |
|
"eval_loss": NaN, |
|
"eval_runtime": 12.6621, |
|
"eval_samples_per_second": 13.426, |
|
"eval_steps_per_second": 6.713, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.009334163036714374, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-06, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.018668326073428748, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.028002489110143122, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.037336652146857496, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04667081518357187, |
|
"grad_norm": NaN, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.056004978220286245, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.06533914125700062, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.07467330429371499, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.08400746733042937, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.4000000000000005e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.09334163036714374, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.10267579340385811, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.6e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.11200995644057249, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.2e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.12134411947728686, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.800000000000001e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.13067828251400124, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.4e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.14001244555071563, |
|
"grad_norm": NaN, |
|
"learning_rate": 9e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.14934660858742999, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.6e-05, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.15868077162414437, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.16801493466085873, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.17112632233976355, |
|
"eval_loss": NaN, |
|
"eval_runtime": 12.9311, |
|
"eval_samples_per_second": 13.147, |
|
"eval_steps_per_second": 6.573, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.17734909769757312, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.18668326073428748, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.19601742377100187, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.20535158680771623, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.21468574984443062, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.22401991288114498, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.23335407591785937, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.24268823895457373, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 0.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.2520224019912881, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 0.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.2613565650280025, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 0.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.2706907280647169, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 0.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.28002489110143125, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2893590541381456, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 0.0, |
|
"step": 93 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 95, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 80, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.95052136267776e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|