{ "best_global_step": null, "best_metric": null, "best_model_checkpoint": null, "epoch": 3.0, "eval_steps": 500, "global_step": 471, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.06389776357827476, "grad_norm": 34.803749574601504, "learning_rate": 1.8750000000000003e-06, "loss": 3.2079, "step": 10 }, { "epoch": 0.12779552715654952, "grad_norm": 8.393596619540387, "learning_rate": 3.958333333333333e-06, "loss": 2.3077, "step": 20 }, { "epoch": 0.19169329073482427, "grad_norm": 4.54582398918815, "learning_rate": 6.041666666666667e-06, "loss": 1.6795, "step": 30 }, { "epoch": 0.25559105431309903, "grad_norm": 2.88137111898239, "learning_rate": 8.125000000000001e-06, "loss": 1.3798, "step": 40 }, { "epoch": 0.3194888178913738, "grad_norm": 3.470416724559835, "learning_rate": 9.999862102299874e-06, "loss": 1.2363, "step": 50 }, { "epoch": 0.38338658146964855, "grad_norm": 3.111739768988857, "learning_rate": 9.983323579940351e-06, "loss": 1.1359, "step": 60 }, { "epoch": 0.4472843450479233, "grad_norm": 2.63691114437442, "learning_rate": 9.939310009499348e-06, "loss": 1.0504, "step": 70 }, { "epoch": 0.5111821086261981, "grad_norm": 2.3175377091892346, "learning_rate": 9.868064055324204e-06, "loss": 1.0116, "step": 80 }, { "epoch": 0.5750798722044729, "grad_norm": 2.30928876775904, "learning_rate": 9.76997852474223e-06, "loss": 0.9679, "step": 90 }, { "epoch": 0.6389776357827476, "grad_norm": 2.2162135977370196, "learning_rate": 9.645594202357438e-06, "loss": 0.944, "step": 100 }, { "epoch": 0.7028753993610224, "grad_norm": 1.9916201881672246, "learning_rate": 9.495596868489588e-06, "loss": 0.921, "step": 110 }, { "epoch": 0.7667731629392971, "grad_norm": 2.2617620915408914, "learning_rate": 9.320813518194084e-06, "loss": 0.9022, "step": 120 }, { "epoch": 0.8306709265175719, "grad_norm": 2.4967363173333292, "learning_rate": 9.122207801708802e-06, "loss": 0.895, "step": 130 }, { "epoch": 0.8945686900958466, "grad_norm": 2.2712304513946906, "learning_rate": 8.900874711466436e-06, "loss": 0.864, "step": 140 }, { "epoch": 0.9584664536741214, "grad_norm": 1.9051776400686733, "learning_rate": 8.658034544965003e-06, "loss": 0.8608, "step": 150 }, { "epoch": 1.0191693290734825, "grad_norm": 1.9166530255225462, "learning_rate": 8.395026176781627e-06, "loss": 0.8301, "step": 160 }, { "epoch": 1.0830670926517572, "grad_norm": 2.064754006824319, "learning_rate": 8.113299676823614e-06, "loss": 0.7483, "step": 170 }, { "epoch": 1.1469648562300319, "grad_norm": 2.082411427574368, "learning_rate": 7.814408315515419e-06, "loss": 0.7494, "step": 180 }, { "epoch": 1.2108626198083068, "grad_norm": 2.2742401088702873, "learning_rate": 7.500000000000001e-06, "loss": 0.7281, "step": 190 }, { "epoch": 1.2747603833865815, "grad_norm": 2.177821331073625, "learning_rate": 7.1718081885702905e-06, "loss": 0.7254, "step": 200 }, { "epoch": 1.3386581469648562, "grad_norm": 2.270677294458059, "learning_rate": 6.831642333423068e-06, "loss": 0.7254, "step": 210 }, { "epoch": 1.4025559105431311, "grad_norm": 1.876950473520462, "learning_rate": 6.481377904428171e-06, "loss": 0.7098, "step": 220 }, { "epoch": 1.4664536741214058, "grad_norm": 1.9386787003754147, "learning_rate": 6.122946048915991e-06, "loss": 0.7025, "step": 230 }, { "epoch": 1.5303514376996805, "grad_norm": 2.098949126790221, "learning_rate": 5.75832294449293e-06, "loss": 0.7031, "step": 240 }, { "epoch": 1.5942492012779552, "grad_norm": 1.9482130248993954, "learning_rate": 5.389518903587016e-06, "loss": 0.699, "step": 250 }, { "epoch": 1.65814696485623, "grad_norm": 2.2416291315780845, "learning_rate": 5.0185672897946515e-06, "loss": 0.679, "step": 260 }, { "epoch": 1.7220447284345048, "grad_norm": 2.0218332843155133, "learning_rate": 4.647513307137076e-06, "loss": 0.6905, "step": 270 }, { "epoch": 1.7859424920127795, "grad_norm": 1.8220659272587023, "learning_rate": 4.278402724035868e-06, "loss": 0.6763, "step": 280 }, { "epoch": 1.8498402555910545, "grad_norm": 1.9040499495108065, "learning_rate": 3.913270594176665e-06, "loss": 0.6666, "step": 290 }, { "epoch": 1.9137380191693292, "grad_norm": 1.9584612801162502, "learning_rate": 3.5541300364475067e-06, "loss": 0.652, "step": 300 }, { "epoch": 1.9776357827476039, "grad_norm": 2.0067481902852218, "learning_rate": 3.202961135812437e-06, "loss": 0.6407, "step": 310 }, { "epoch": 2.038338658146965, "grad_norm": 2.2045322624750185, "learning_rate": 2.861700026314308e-06, "loss": 0.5873, "step": 320 }, { "epoch": 2.1022364217252396, "grad_norm": 1.9777465745041298, "learning_rate": 2.5322282163965096e-06, "loss": 0.5501, "step": 330 }, { "epoch": 2.1661341853035143, "grad_norm": 2.188183911470147, "learning_rate": 2.216362215397393e-06, "loss": 0.5408, "step": 340 }, { "epoch": 2.230031948881789, "grad_norm": 1.994203838618425, "learning_rate": 1.91584351841065e-06, "loss": 0.5307, "step": 350 }, { "epoch": 2.2939297124600637, "grad_norm": 2.1333509330982396, "learning_rate": 1.6323290047291196e-06, "loss": 0.5185, "step": 360 }, { "epoch": 2.357827476038339, "grad_norm": 2.0993869032798114, "learning_rate": 1.367381802809185e-06, "loss": 0.5134, "step": 370 }, { "epoch": 2.4217252396166136, "grad_norm": 1.954631998509726, "learning_rate": 1.1224626721209141e-06, "loss": 0.5196, "step": 380 }, { "epoch": 2.4856230031948883, "grad_norm": 1.9740302894409982, "learning_rate": 8.989219493991791e-07, "loss": 0.5114, "step": 390 }, { "epoch": 2.549520766773163, "grad_norm": 2.089443520663485, "learning_rate": 6.979921036993042e-07, "loss": 0.5143, "step": 400 }, { "epoch": 2.6134185303514377, "grad_norm": 2.1355946780064587, "learning_rate": 5.207809413041914e-07, "loss": 0.506, "step": 410 }, { "epoch": 2.6773162939297124, "grad_norm": 2.1209465409863024, "learning_rate": 3.6826549794698074e-07, "loss": 0.5048, "step": 420 }, { "epoch": 2.741214057507987, "grad_norm": 1.9717542566555746, "learning_rate": 2.4128665202382327e-07, "loss": 0.5104, "step": 430 }, { "epoch": 2.8051118210862622, "grad_norm": 1.9192059742029721, "learning_rate": 1.4054448849631087e-07, "loss": 0.5039, "step": 440 }, { "epoch": 2.8690095846645365, "grad_norm": 2.0469451263493657, "learning_rate": 6.659443904419638e-08, "loss": 0.5093, "step": 450 }, { "epoch": 2.9329073482428116, "grad_norm": 2.0780858253955454, "learning_rate": 1.984421974927375e-08, "loss": 0.4971, "step": 460 }, { "epoch": 2.9968051118210863, "grad_norm": 2.2435204264420503, "learning_rate": 5.515831941993455e-10, "loss": 0.5139, "step": 470 }, { "epoch": 3.0, "step": 471, "total_flos": 36429583613952.0, "train_loss": 0.8295695250201377, "train_runtime": 3334.6516, "train_samples_per_second": 9.004, "train_steps_per_second": 0.141 } ], "logging_steps": 10, "max_steps": 471, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 36429583613952.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }