{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.0, "eval_steps": 500, "global_step": 268, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.018656716417910446, "grad_norm": 1.8356504566992033, "learning_rate": 1.785714285714286e-05, "loss": 0.8456, "step": 5 }, { "epoch": 0.03731343283582089, "grad_norm": 0.7821599244269375, "learning_rate": 3.571428571428572e-05, "loss": 0.7682, "step": 10 }, { "epoch": 0.055970149253731345, "grad_norm": 0.50179928670827, "learning_rate": 4.999827900623038e-05, "loss": 0.7026, "step": 15 }, { "epoch": 0.07462686567164178, "grad_norm": 0.382416164181803, "learning_rate": 4.993807186343243e-05, "loss": 0.6746, "step": 20 }, { "epoch": 0.09328358208955224, "grad_norm": 0.3566825030851752, "learning_rate": 4.979207812402531e-05, "loss": 0.6436, "step": 25 }, { "epoch": 0.11194029850746269, "grad_norm": 0.2695784425367559, "learning_rate": 4.956085596012407e-05, "loss": 0.6363, "step": 30 }, { "epoch": 0.13059701492537312, "grad_norm": 0.2407490943407067, "learning_rate": 4.924528939432311e-05, "loss": 0.6199, "step": 35 }, { "epoch": 0.14925373134328357, "grad_norm": 0.23317688086377053, "learning_rate": 4.884658491984735e-05, "loss": 0.6106, "step": 40 }, { "epoch": 0.16791044776119404, "grad_norm": 0.20339037620270942, "learning_rate": 4.8366266887814235e-05, "loss": 0.6112, "step": 45 }, { "epoch": 0.1865671641791045, "grad_norm": 0.22054431853246134, "learning_rate": 4.780617167924209e-05, "loss": 0.5938, "step": 50 }, { "epoch": 0.20522388059701493, "grad_norm": 0.2268423795334953, "learning_rate": 4.716844068408693e-05, "loss": 0.5964, "step": 55 }, { "epoch": 0.22388059701492538, "grad_norm": 0.24806960586700805, "learning_rate": 4.6455512114150546e-05, "loss": 0.5917, "step": 60 }, { "epoch": 0.24253731343283583, "grad_norm": 0.43486878673463725, "learning_rate": 4.5670111681161296e-05, "loss": 0.5827, "step": 65 }, { "epoch": 0.26119402985074625, "grad_norm": 0.31606900338954863, "learning_rate": 4.481524217566783e-05, "loss": 0.5798, "step": 70 }, { "epoch": 0.2798507462686567, "grad_norm": 0.46043561151419643, "learning_rate": 4.3894171986588217e-05, "loss": 0.5786, "step": 75 }, { "epoch": 0.29850746268656714, "grad_norm": 0.39095627945199035, "learning_rate": 4.29104226053073e-05, "loss": 0.5773, "step": 80 }, { "epoch": 0.31716417910447764, "grad_norm": 0.31349920294253814, "learning_rate": 4.186775516209732e-05, "loss": 0.5727, "step": 85 }, { "epoch": 0.3358208955223881, "grad_norm": 0.3018041196612178, "learning_rate": 4.077015604633669e-05, "loss": 0.5752, "step": 90 }, { "epoch": 0.35447761194029853, "grad_norm": 0.28844151656468714, "learning_rate": 3.962182166550441e-05, "loss": 0.5736, "step": 95 }, { "epoch": 0.373134328358209, "grad_norm": 0.2556262580728924, "learning_rate": 3.8427142401220634e-05, "loss": 0.5696, "step": 100 }, { "epoch": 0.3917910447761194, "grad_norm": 0.2367875729801897, "learning_rate": 3.71906858236735e-05, "loss": 0.5659, "step": 105 }, { "epoch": 0.41044776119402987, "grad_norm": 0.2527356962681597, "learning_rate": 3.591717922860785e-05, "loss": 0.5732, "step": 110 }, { "epoch": 0.4291044776119403, "grad_norm": 0.22725735986760126, "learning_rate": 3.46114915636416e-05, "loss": 0.564, "step": 115 }, { "epoch": 0.44776119402985076, "grad_norm": 0.21101640414812647, "learning_rate": 3.3278614813010034e-05, "loss": 0.565, "step": 120 }, { "epoch": 0.4664179104477612, "grad_norm": 0.23434890898668598, "learning_rate": 3.1923644911909e-05, "loss": 0.5618, "step": 125 }, { "epoch": 0.48507462686567165, "grad_norm": 0.21223049539236288, "learning_rate": 3.0551762263406576e-05, "loss": 0.5605, "step": 130 }, { "epoch": 0.503731343283582, "grad_norm": 0.2134933060504168, "learning_rate": 2.9168211932412042e-05, "loss": 0.5577, "step": 135 }, { "epoch": 0.5223880597014925, "grad_norm": 0.1926844455351086, "learning_rate": 2.777828359242567e-05, "loss": 0.5631, "step": 140 }, { "epoch": 0.5410447761194029, "grad_norm": 0.23573127243858358, "learning_rate": 2.6387291301738377e-05, "loss": 0.5557, "step": 145 }, { "epoch": 0.5597014925373134, "grad_norm": 0.1993992857842917, "learning_rate": 2.50005531864019e-05, "loss": 0.5536, "step": 150 }, { "epoch": 0.5783582089552238, "grad_norm": 0.1834644185443385, "learning_rate": 2.362337110764688e-05, "loss": 0.5539, "step": 155 }, { "epoch": 0.5970149253731343, "grad_norm": 0.20003960229058398, "learning_rate": 2.226101039148557e-05, "loss": 0.5521, "step": 160 }, { "epoch": 0.6156716417910447, "grad_norm": 0.2089691748732325, "learning_rate": 2.0918679697998252e-05, "loss": 0.551, "step": 165 }, { "epoch": 0.6343283582089553, "grad_norm": 0.19473282012508036, "learning_rate": 1.9601511107268255e-05, "loss": 0.5514, "step": 170 }, { "epoch": 0.6529850746268657, "grad_norm": 0.18752031656330145, "learning_rate": 1.8314540498102216e-05, "loss": 0.5511, "step": 175 }, { "epoch": 0.6716417910447762, "grad_norm": 0.1658985447570826, "learning_rate": 1.7062688294552992e-05, "loss": 0.5431, "step": 180 }, { "epoch": 0.6902985074626866, "grad_norm": 0.1812387011047143, "learning_rate": 1.5850740653856096e-05, "loss": 0.5465, "step": 185 }, { "epoch": 0.7089552238805971, "grad_norm": 0.1635020835503139, "learning_rate": 1.4683331167703218e-05, "loss": 0.5501, "step": 190 }, { "epoch": 0.7276119402985075, "grad_norm": 0.16593032564759602, "learning_rate": 1.356492314681356e-05, "loss": 0.5529, "step": 195 }, { "epoch": 0.746268656716418, "grad_norm": 0.1535638619239139, "learning_rate": 1.2499792556533716e-05, "loss": 0.5473, "step": 200 }, { "epoch": 0.7649253731343284, "grad_norm": 0.16902270855953427, "learning_rate": 1.1492011668707753e-05, "loss": 0.5448, "step": 205 }, { "epoch": 0.7835820895522388, "grad_norm": 0.16270665257345254, "learning_rate": 1.0545433492320603e-05, "loss": 0.5499, "step": 210 }, { "epoch": 0.8022388059701493, "grad_norm": 0.14976763276244748, "learning_rate": 9.663677042440537e-06, "loss": 0.5443, "step": 215 }, { "epoch": 0.8208955223880597, "grad_norm": 0.15815132358893125, "learning_rate": 8.850113503781367e-06, "loss": 0.5441, "step": 220 }, { "epoch": 0.8395522388059702, "grad_norm": 0.14067957321186905, "learning_rate": 8.107853341784671e-06, "loss": 0.5505, "step": 225 }, { "epoch": 0.8582089552238806, "grad_norm": 0.15572802809623834, "learning_rate": 7.439734410499752e-06, "loss": 0.5469, "step": 230 }, { "epoch": 0.8768656716417911, "grad_norm": 0.1705246886285706, "learning_rate": 6.848311102728011e-06, "loss": 0.547, "step": 235 }, { "epoch": 0.8955223880597015, "grad_norm": 0.16495921290169394, "learning_rate": 6.335844583913515e-06, "loss": 0.5432, "step": 240 }, { "epoch": 0.914179104477612, "grad_norm": 0.13735669951445256, "learning_rate": 5.904294147118193e-06, "loss": 0.5469, "step": 245 }, { "epoch": 0.9328358208955224, "grad_norm": 0.14217963361050576, "learning_rate": 5.555309722133842e-06, "loss": 0.5434, "step": 250 }, { "epoch": 0.9514925373134329, "grad_norm": 0.13779272640691465, "learning_rate": 5.290225567370509e-06, "loss": 0.5395, "step": 255 }, { "epoch": 0.9701492537313433, "grad_norm": 0.1420003921635414, "learning_rate": 5.110055168638854e-06, "loss": 0.5431, "step": 260 }, { "epoch": 0.9888059701492538, "grad_norm": 0.13791108083773904, "learning_rate": 5.0154873643297575e-06, "loss": 0.5469, "step": 265 }, { "epoch": 1.0, "step": 268, "total_flos": 488621249396736.0, "train_loss": 0.0, "train_runtime": 2.3155, "train_samples_per_second": 14813.438, "train_steps_per_second": 115.74 } ], "logging_steps": 5, "max_steps": 268, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 488621249396736.0, "train_batch_size": 16, "trial_name": null, "trial_params": null }