|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 198, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00506409241968666, |
|
"grad_norm": 5.678173069867947, |
|
"learning_rate": 0.0001, |
|
"loss": 0.3528, |
|
"mean_token_accuracy": 0.9147689286619425, |
|
"num_tokens": 586487.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05064092419686659, |
|
"grad_norm": 0.3134267771799764, |
|
"learning_rate": 0.0001, |
|
"loss": 0.2555, |
|
"mean_token_accuracy": 0.9199666447109647, |
|
"num_tokens": 5968280.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10128184839373318, |
|
"grad_norm": 0.14841672886560275, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1589, |
|
"mean_token_accuracy": 0.9397787630558014, |
|
"num_tokens": 11930513.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15192277259059978, |
|
"grad_norm": 0.0924952729966239, |
|
"learning_rate": 0.0001, |
|
"loss": 0.139, |
|
"mean_token_accuracy": 0.946698647364974, |
|
"num_tokens": 17853273.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.20256369678746636, |
|
"grad_norm": 0.08634084330086704, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1277, |
|
"mean_token_accuracy": 0.95089195612818, |
|
"num_tokens": 23792525.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.25320462098433294, |
|
"grad_norm": 0.07548012506474341, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1212, |
|
"mean_token_accuracy": 0.953272457793355, |
|
"num_tokens": 29710036.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.30384554518119955, |
|
"grad_norm": 0.07448026254865987, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1151, |
|
"mean_token_accuracy": 0.9557252813130617, |
|
"num_tokens": 35685902.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.35448646937806616, |
|
"grad_norm": 0.0879124617580733, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1112, |
|
"mean_token_accuracy": 0.957416345924139, |
|
"num_tokens": 41642058.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.4051273935749327, |
|
"grad_norm": 0.07273411152988447, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1098, |
|
"mean_token_accuracy": 0.9579035054892302, |
|
"num_tokens": 47564730.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4557683177717993, |
|
"grad_norm": 0.07959899342781425, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1076, |
|
"mean_token_accuracy": 0.9587001299485565, |
|
"num_tokens": 53504116.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5064092419686659, |
|
"grad_norm": 0.09016384504615442, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1064, |
|
"mean_token_accuracy": 0.9593421731144189, |
|
"num_tokens": 59463168.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5570501661655325, |
|
"grad_norm": 0.08920769596857461, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1061, |
|
"mean_token_accuracy": 0.9594598092138767, |
|
"num_tokens": 65384936.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.6076910903623991, |
|
"grad_norm": 0.07392576254888708, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1037, |
|
"mean_token_accuracy": 0.960389680787921, |
|
"num_tokens": 71307379.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6583320145592657, |
|
"grad_norm": 0.07677666218910574, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1034, |
|
"mean_token_accuracy": 0.9603631895035505, |
|
"num_tokens": 77241947.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.7089729387561323, |
|
"grad_norm": 0.08726421315898845, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1039, |
|
"mean_token_accuracy": 0.9603854931890965, |
|
"num_tokens": 83153311.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7596138629529989, |
|
"grad_norm": 0.07666737355553582, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1025, |
|
"mean_token_accuracy": 0.9607552452012896, |
|
"num_tokens": 89070746.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.8102547871498654, |
|
"grad_norm": 0.07414032432163672, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1015, |
|
"mean_token_accuracy": 0.9611390510573983, |
|
"num_tokens": 95023182.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.860895711346732, |
|
"grad_norm": 0.07359359677738689, |
|
"learning_rate": 0.0001, |
|
"loss": 0.101, |
|
"mean_token_accuracy": 0.9613310528919101, |
|
"num_tokens": 100991573.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.9115366355435987, |
|
"grad_norm": 0.07081191483858919, |
|
"learning_rate": 0.0001, |
|
"loss": 0.101, |
|
"mean_token_accuracy": 0.9613639689981938, |
|
"num_tokens": 106911646.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9621775597404653, |
|
"grad_norm": 0.06862894355772857, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1005, |
|
"mean_token_accuracy": 0.9614966074004769, |
|
"num_tokens": 112858548.0, |
|
"step": 190 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 788, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 308608790560768.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|