{ "best_global_step": null, "best_metric": null, "best_model_checkpoint": null, "epoch": 2.549520766773163, "eval_steps": 500, "global_step": 400, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.06389776357827476, "grad_norm": 27.007149720149624, "learning_rate": 1.8750000000000003e-06, "loss": 3.0251, "step": 10 }, { "epoch": 0.12779552715654952, "grad_norm": 7.200384353707933, "learning_rate": 3.958333333333333e-06, "loss": 2.2511, "step": 20 }, { "epoch": 0.19169329073482427, "grad_norm": 3.556500087950233, "learning_rate": 6.041666666666667e-06, "loss": 1.6305, "step": 30 }, { "epoch": 0.25559105431309903, "grad_norm": 2.6746825713695674, "learning_rate": 8.125000000000001e-06, "loss": 1.2942, "step": 40 }, { "epoch": 0.3194888178913738, "grad_norm": 3.016385157178359, "learning_rate": 9.999862102299874e-06, "loss": 1.1516, "step": 50 }, { "epoch": 0.38338658146964855, "grad_norm": 2.6763627648882995, "learning_rate": 9.983323579940351e-06, "loss": 1.0652, "step": 60 }, { "epoch": 0.4472843450479233, "grad_norm": 2.4049859456438094, "learning_rate": 9.939310009499348e-06, "loss": 0.9943, "step": 70 }, { "epoch": 0.5111821086261981, "grad_norm": 2.105383899953641, "learning_rate": 9.868064055324204e-06, "loss": 0.9601, "step": 80 }, { "epoch": 0.5750798722044729, "grad_norm": 2.1151352790518603, "learning_rate": 9.76997852474223e-06, "loss": 0.9231, "step": 90 }, { "epoch": 0.6389776357827476, "grad_norm": 1.792736838810565, "learning_rate": 9.645594202357438e-06, "loss": 0.9041, "step": 100 }, { "epoch": 0.7028753993610224, "grad_norm": 1.8385962515662637, "learning_rate": 9.495596868489588e-06, "loss": 0.8842, "step": 110 }, { "epoch": 0.7667731629392971, "grad_norm": 2.05797408169935, "learning_rate": 9.320813518194084e-06, "loss": 0.8689, "step": 120 }, { "epoch": 0.8306709265175719, "grad_norm": 2.0006315711572107, "learning_rate": 9.122207801708802e-06, "loss": 0.8651, "step": 130 }, { "epoch": 0.8945686900958466, "grad_norm": 2.0622583744546685, "learning_rate": 8.900874711466436e-06, "loss": 0.8368, "step": 140 }, { "epoch": 0.9584664536741214, "grad_norm": 1.9281533048022628, "learning_rate": 8.658034544965003e-06, "loss": 0.8362, "step": 150 }, { "epoch": 1.0191693290734825, "grad_norm": 1.7856911939628375, "learning_rate": 8.395026176781627e-06, "loss": 0.8083, "step": 160 }, { "epoch": 1.0830670926517572, "grad_norm": 1.857546453101415, "learning_rate": 8.113299676823614e-06, "loss": 0.7305, "step": 170 }, { "epoch": 1.1469648562300319, "grad_norm": 1.870887786495633, "learning_rate": 7.814408315515419e-06, "loss": 0.7347, "step": 180 }, { "epoch": 1.2108626198083068, "grad_norm": 1.936193043129797, "learning_rate": 7.500000000000001e-06, "loss": 0.7142, "step": 190 }, { "epoch": 1.2747603833865815, "grad_norm": 2.083355561746048, "learning_rate": 7.1718081885702905e-06, "loss": 0.7132, "step": 200 }, { "epoch": 1.3386581469648562, "grad_norm": 2.2188701115912477, "learning_rate": 6.831642333423068e-06, "loss": 0.7146, "step": 210 }, { "epoch": 1.4025559105431311, "grad_norm": 1.7246379127732157, "learning_rate": 6.481377904428171e-06, "loss": 0.6986, "step": 220 }, { "epoch": 1.4664536741214058, "grad_norm": 1.7746101845242512, "learning_rate": 6.122946048915991e-06, "loss": 0.692, "step": 230 }, { "epoch": 1.5303514376996805, "grad_norm": 1.9854646827619695, "learning_rate": 5.75832294449293e-06, "loss": 0.6934, "step": 240 }, { "epoch": 1.5942492012779552, "grad_norm": 2.804872064894909, "learning_rate": 5.389518903587016e-06, "loss": 0.6904, "step": 250 }, { "epoch": 1.65814696485623, "grad_norm": 2.1688342723582665, "learning_rate": 5.0185672897946515e-06, "loss": 0.6717, "step": 260 }, { "epoch": 1.7220447284345048, "grad_norm": 1.8837334998414557, "learning_rate": 4.647513307137076e-06, "loss": 0.6835, "step": 270 }, { "epoch": 1.7859424920127795, "grad_norm": 1.7206260961087232, "learning_rate": 4.278402724035868e-06, "loss": 0.6692, "step": 280 }, { "epoch": 1.8498402555910545, "grad_norm": 1.7927077279083763, "learning_rate": 3.913270594176665e-06, "loss": 0.6597, "step": 290 }, { "epoch": 1.9137380191693292, "grad_norm": 1.8582153739647842, "learning_rate": 3.5541300364475067e-06, "loss": 0.6467, "step": 300 }, { "epoch": 1.9776357827476039, "grad_norm": 1.9389771348545655, "learning_rate": 3.202961135812437e-06, "loss": 0.6351, "step": 310 }, { "epoch": 2.038338658146965, "grad_norm": 2.186799488826696, "learning_rate": 2.861700026314308e-06, "loss": 0.5844, "step": 320 }, { "epoch": 2.1022364217252396, "grad_norm": 1.862856927563484, "learning_rate": 2.5322282163965096e-06, "loss": 0.5498, "step": 330 }, { "epoch": 2.1661341853035143, "grad_norm": 2.076202990686732, "learning_rate": 2.216362215397393e-06, "loss": 0.5414, "step": 340 }, { "epoch": 2.230031948881789, "grad_norm": 1.9052335758846242, "learning_rate": 1.91584351841065e-06, "loss": 0.531, "step": 350 }, { "epoch": 2.2939297124600637, "grad_norm": 2.0645734722050144, "learning_rate": 1.6323290047291196e-06, "loss": 0.5197, "step": 360 }, { "epoch": 2.357827476038339, "grad_norm": 2.026798582289315, "learning_rate": 1.367381802809185e-06, "loss": 0.5145, "step": 370 }, { "epoch": 2.4217252396166136, "grad_norm": 1.9047823063501508, "learning_rate": 1.1224626721209141e-06, "loss": 0.52, "step": 380 }, { "epoch": 2.4856230031948883, "grad_norm": 1.9193918008729423, "learning_rate": 8.989219493991791e-07, "loss": 0.5122, "step": 390 }, { "epoch": 2.549520766773163, "grad_norm": 2.0562904526376458, "learning_rate": 6.979921036993042e-07, "loss": 0.5144, "step": 400 } ], "logging_steps": 10, "max_steps": 471, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 200, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 31210476404736.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }