| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.993325917686318, | |
| "eval_steps": 500, | |
| "global_step": 336, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.11865035224323323, | |
| "grad_norm": 0.5938982963562012, | |
| "learning_rate": 9.723076923076924e-05, | |
| "loss": 1.2521, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.23730070448646645, | |
| "grad_norm": 0.19858305156230927, | |
| "learning_rate": 9.107692307692308e-05, | |
| "loss": 0.3248, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3559510567296997, | |
| "grad_norm": 0.05706700682640076, | |
| "learning_rate": 8.492307692307693e-05, | |
| "loss": 0.0197, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4746014089729329, | |
| "grad_norm": 0.0070083774626255035, | |
| "learning_rate": 7.876923076923077e-05, | |
| "loss": 0.0028, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5932517612161661, | |
| "grad_norm": 0.004076245240867138, | |
| "learning_rate": 7.261538461538462e-05, | |
| "loss": 0.0007, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7119021134593994, | |
| "grad_norm": 0.003080987138673663, | |
| "learning_rate": 6.646153846153846e-05, | |
| "loss": 0.0005, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.8305524657026325, | |
| "grad_norm": 0.0025518136098980904, | |
| "learning_rate": 6.030769230769231e-05, | |
| "loss": 0.0004, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9492028179458658, | |
| "grad_norm": 0.0022116131149232388, | |
| "learning_rate": 5.4153846153846156e-05, | |
| "loss": 0.0004, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.067853170189099, | |
| "grad_norm": 0.0019687730818986893, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.0003, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.1865035224323321, | |
| "grad_norm": 0.0017901354003697634, | |
| "learning_rate": 4.1846153846153846e-05, | |
| "loss": 0.0003, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.3051538746755655, | |
| "grad_norm": 0.0016503820661455393, | |
| "learning_rate": 3.569230769230769e-05, | |
| "loss": 0.0003, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.4238042269187987, | |
| "grad_norm": 0.0015508969081565738, | |
| "learning_rate": 2.9538461538461543e-05, | |
| "loss": 0.0003, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.5424545791620319, | |
| "grad_norm": 0.001471668598242104, | |
| "learning_rate": 2.3384615384615385e-05, | |
| "loss": 0.0002, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.6611049314052653, | |
| "grad_norm": 0.0014170887880027294, | |
| "learning_rate": 1.723076923076923e-05, | |
| "loss": 0.0002, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.7797552836484982, | |
| "grad_norm": 0.0013795532286167145, | |
| "learning_rate": 1.1076923076923077e-05, | |
| "loss": 0.0002, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.8984056358917316, | |
| "grad_norm": 0.0013580905506387353, | |
| "learning_rate": 4.923076923076923e-06, | |
| "loss": 0.0002, | |
| "step": 320 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 336, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.3387692641288192e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |