reach-vb's picture
reach-vb HF staff
Upload folder using huggingface_hub
ef7f7d1 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.059923098690014484,
"eval_steps": 1000,
"global_step": 7200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.322652595835345e-06,
"grad_norm": 14.9375,
"learning_rate": 2e-06,
"loss": 0.6484,
"step": 1
},
{
"epoch": 0.0008322652595835345,
"grad_norm": 0.3671875,
"learning_rate": 0.0002,
"loss": 0.4345,
"step": 100
},
{
"epoch": 0.001664530519167069,
"grad_norm": 0.1689453125,
"learning_rate": 0.0004,
"loss": 0.2537,
"step": 200
},
{
"epoch": 0.0024967957787506035,
"grad_norm": 0.55859375,
"learning_rate": 0.0006,
"loss": 0.2344,
"step": 300
},
{
"epoch": 0.003329061038334138,
"grad_norm": 0.154296875,
"learning_rate": 0.0008,
"loss": 0.222,
"step": 400
},
{
"epoch": 0.004161326297917672,
"grad_norm": 0.126953125,
"learning_rate": 0.001,
"loss": 0.2142,
"step": 500
},
{
"epoch": 0.004993591557501207,
"grad_norm": 0.12353515625,
"learning_rate": 0.0012,
"loss": 0.2011,
"step": 600
},
{
"epoch": 0.005825856817084741,
"grad_norm": 0.10693359375,
"learning_rate": 0.0014,
"loss": 0.1897,
"step": 700
},
{
"epoch": 0.006658122076668276,
"grad_norm": 0.10595703125,
"learning_rate": 0.0016,
"loss": 0.1775,
"step": 800
},
{
"epoch": 0.0074903873362518105,
"grad_norm": 0.0859375,
"learning_rate": 0.0018000000000000002,
"loss": 0.1615,
"step": 900
},
{
"epoch": 0.008322652595835344,
"grad_norm": 0.083984375,
"learning_rate": 0.002,
"loss": 0.1444,
"step": 1000
},
{
"epoch": 0.008322652595835344,
"eval_peoplespeech-clean-transcription_loss": 2.2110702991485596,
"eval_peoplespeech-clean-transcription_model_preparation_time": 0.0065,
"eval_peoplespeech-clean-transcription_runtime": 9.5405,
"eval_peoplespeech-clean-transcription_samples_per_second": 6.708,
"eval_peoplespeech-clean-transcription_steps_per_second": 0.839,
"step": 1000
},
{
"epoch": 0.009154917855418878,
"grad_norm": 0.0771484375,
"learning_rate": 0.001999725185109816,
"loss": 0.1323,
"step": 1100
},
{
"epoch": 0.009987183115002414,
"grad_norm": 0.083984375,
"learning_rate": 0.0019989008914857113,
"loss": 0.1242,
"step": 1200
},
{
"epoch": 0.010819448374585948,
"grad_norm": 0.07861328125,
"learning_rate": 0.00199752757218401,
"loss": 0.1162,
"step": 1300
},
{
"epoch": 0.011651713634169482,
"grad_norm": 0.07275390625,
"learning_rate": 0.001995605982021898,
"loss": 0.1128,
"step": 1400
},
{
"epoch": 0.012483978893753018,
"grad_norm": 0.07666015625,
"learning_rate": 0.0019931371771625545,
"loss": 0.1094,
"step": 1500
},
{
"epoch": 0.013316244153336551,
"grad_norm": 0.06201171875,
"learning_rate": 0.001990122514534651,
"loss": 0.1052,
"step": 1600
},
{
"epoch": 0.014148509412920085,
"grad_norm": 0.058837890625,
"learning_rate": 0.0019865636510865464,
"loss": 0.1022,
"step": 1700
},
{
"epoch": 0.014980774672503621,
"grad_norm": 0.0625,
"learning_rate": 0.001982462542875576,
"loss": 0.1011,
"step": 1800
},
{
"epoch": 0.015813039932087155,
"grad_norm": 0.06787109375,
"learning_rate": 0.001977821443992945,
"loss": 0.0983,
"step": 1900
},
{
"epoch": 0.01664530519167069,
"grad_norm": 0.0537109375,
"learning_rate": 0.001972642905324813,
"loss": 0.0975,
"step": 2000
},
{
"epoch": 0.01664530519167069,
"eval_peoplespeech-clean-transcription_loss": 1.6914767026901245,
"eval_peoplespeech-clean-transcription_model_preparation_time": 0.0065,
"eval_peoplespeech-clean-transcription_runtime": 9.5301,
"eval_peoplespeech-clean-transcription_samples_per_second": 6.716,
"eval_peoplespeech-clean-transcription_steps_per_second": 0.839,
"step": 2000
},
{
"epoch": 0.017477570451254223,
"grad_norm": 0.05859375,
"learning_rate": 0.0019669297731502505,
"loss": 0.0947,
"step": 2100
},
{
"epoch": 0.018309835710837757,
"grad_norm": 0.062255859375,
"learning_rate": 0.00196068518757684,
"loss": 0.0935,
"step": 2200
},
{
"epoch": 0.019142100970421294,
"grad_norm": 0.059326171875,
"learning_rate": 0.001953912580814779,
"loss": 0.0911,
"step": 2300
},
{
"epoch": 0.019974366230004828,
"grad_norm": 0.060302734375,
"learning_rate": 0.0019466156752904343,
"loss": 0.0904,
"step": 2400
},
{
"epoch": 0.020806631489588362,
"grad_norm": 0.0615234375,
"learning_rate": 0.0019387984816003866,
"loss": 0.0882,
"step": 2500
},
{
"epoch": 0.021638896749171896,
"grad_norm": 0.056884765625,
"learning_rate": 0.0019304652963070869,
"loss": 0.0874,
"step": 2600
},
{
"epoch": 0.02247116200875543,
"grad_norm": 0.049072265625,
"learning_rate": 0.0019216206995773372,
"loss": 0.0871,
"step": 2700
},
{
"epoch": 0.023303427268338964,
"grad_norm": 0.053955078125,
"learning_rate": 0.0019122695526648968,
"loss": 0.0856,
"step": 2800
},
{
"epoch": 0.0241356925279225,
"grad_norm": 0.05517578125,
"learning_rate": 0.0019024169952385887,
"loss": 0.0845,
"step": 2900
},
{
"epoch": 0.024967957787506035,
"grad_norm": 0.0546875,
"learning_rate": 0.0018920684425573864,
"loss": 0.0852,
"step": 3000
},
{
"epoch": 0.024967957787506035,
"eval_peoplespeech-clean-transcription_loss": 1.6414048671722412,
"eval_peoplespeech-clean-transcription_model_preparation_time": 0.0065,
"eval_peoplespeech-clean-transcription_runtime": 9.8151,
"eval_peoplespeech-clean-transcription_samples_per_second": 6.521,
"eval_peoplespeech-clean-transcription_steps_per_second": 0.815,
"step": 3000
},
{
"epoch": 0.02580022304708957,
"grad_norm": 0.052978515625,
"learning_rate": 0.0018812295824940284,
"loss": 0.0836,
"step": 3100
},
{
"epoch": 0.026632488306673103,
"grad_norm": 0.05126953125,
"learning_rate": 0.0018699063724087904,
"loss": 0.0821,
"step": 3200
},
{
"epoch": 0.027464753566256637,
"grad_norm": 0.056884765625,
"learning_rate": 0.0018581050358751443,
"loss": 0.0816,
"step": 3300
},
{
"epoch": 0.02829701882584017,
"grad_norm": 0.045654296875,
"learning_rate": 0.0018458320592590974,
"loss": 0.0814,
"step": 3400
},
{
"epoch": 0.029129284085423705,
"grad_norm": 0.04638671875,
"learning_rate": 0.0018330941881540914,
"loss": 0.0791,
"step": 3500
},
{
"epoch": 0.029961549345007242,
"grad_norm": 0.043212890625,
"learning_rate": 0.0018198984236734246,
"loss": 0.0804,
"step": 3600
},
{
"epoch": 0.030793814604590776,
"grad_norm": 0.054931640625,
"learning_rate": 0.0018062520186022297,
"loss": 0.0802,
"step": 3700
},
{
"epoch": 0.03162607986417431,
"grad_norm": 0.054931640625,
"learning_rate": 0.0017921624734111292,
"loss": 0.0805,
"step": 3800
},
{
"epoch": 0.032458345123757844,
"grad_norm": 0.048095703125,
"learning_rate": 0.001777637532133752,
"loss": 0.079,
"step": 3900
},
{
"epoch": 0.03329061038334138,
"grad_norm": 0.0458984375,
"learning_rate": 0.0017626851781103819,
"loss": 0.0759,
"step": 4000
},
{
"epoch": 0.03329061038334138,
"eval_peoplespeech-clean-transcription_loss": 1.5952188968658447,
"eval_peoplespeech-clean-transcription_model_preparation_time": 0.0065,
"eval_peoplespeech-clean-transcription_runtime": 9.4647,
"eval_peoplespeech-clean-transcription_samples_per_second": 6.762,
"eval_peoplespeech-clean-transcription_steps_per_second": 0.845,
"step": 4000
},
{
"epoch": 0.03412287564292491,
"grad_norm": 0.06494140625,
"learning_rate": 0.001747313629600077,
"loss": 0.0774,
"step": 4100
},
{
"epoch": 0.034955140902508446,
"grad_norm": 0.043701171875,
"learning_rate": 0.001731531335263669,
"loss": 0.0774,
"step": 4200
},
{
"epoch": 0.03578740616209198,
"grad_norm": 0.048095703125,
"learning_rate": 0.0017153469695201276,
"loss": 0.0769,
"step": 4300
},
{
"epoch": 0.03661967142167551,
"grad_norm": 0.051513671875,
"learning_rate": 0.0016987694277788418,
"loss": 0.0763,
"step": 4400
},
{
"epoch": 0.037451936681259054,
"grad_norm": 0.047607421875,
"learning_rate": 0.001681807821550438,
"loss": 0.0774,
"step": 4500
},
{
"epoch": 0.03828420194084259,
"grad_norm": 0.040283203125,
"learning_rate": 0.0016644714734388218,
"loss": 0.0748,
"step": 4600
},
{
"epoch": 0.03911646720042612,
"grad_norm": 0.05419921875,
"learning_rate": 0.0016467699120171987,
"loss": 0.0755,
"step": 4700
},
{
"epoch": 0.039948732460009656,
"grad_norm": 0.048828125,
"learning_rate": 0.001628712866590885,
"loss": 0.0732,
"step": 4800
},
{
"epoch": 0.04078099771959319,
"grad_norm": 0.046875,
"learning_rate": 0.0016103102618497923,
"loss": 0.0757,
"step": 4900
},
{
"epoch": 0.041613262979176724,
"grad_norm": 0.042236328125,
"learning_rate": 0.0015915722124135226,
"loss": 0.0738,
"step": 5000
},
{
"epoch": 0.041613262979176724,
"eval_peoplespeech-clean-transcription_loss": 1.5546748638153076,
"eval_peoplespeech-clean-transcription_model_preparation_time": 0.0065,
"eval_peoplespeech-clean-transcription_runtime": 9.345,
"eval_peoplespeech-clean-transcription_samples_per_second": 6.849,
"eval_peoplespeech-clean-transcription_steps_per_second": 0.856,
"step": 5000
},
{
"epoch": 0.04244552823876026,
"grad_norm": 0.045166015625,
"learning_rate": 0.001572509017272072,
"loss": 0.0742,
"step": 5100
},
{
"epoch": 0.04327779349834379,
"grad_norm": 0.0556640625,
"learning_rate": 0.0015531311541251993,
"loss": 0.0746,
"step": 5200
},
{
"epoch": 0.044110058757927326,
"grad_norm": 0.043212890625,
"learning_rate": 0.0015334492736235703,
"loss": 0.073,
"step": 5300
},
{
"epoch": 0.04494232401751086,
"grad_norm": 0.044921875,
"learning_rate": 0.0015134741935148419,
"loss": 0.0727,
"step": 5400
},
{
"epoch": 0.045774589277094394,
"grad_norm": 0.05029296875,
"learning_rate": 0.0014932168926979072,
"loss": 0.0726,
"step": 5500
},
{
"epoch": 0.04660685453667793,
"grad_norm": 0.048095703125,
"learning_rate": 0.0014726885051885652,
"loss": 0.0718,
"step": 5600
},
{
"epoch": 0.04743911979626146,
"grad_norm": 0.046875,
"learning_rate": 0.0014519003139999338,
"loss": 0.0736,
"step": 5700
},
{
"epoch": 0.048271385055845,
"grad_norm": 0.051513671875,
"learning_rate": 0.0014308637449409706,
"loss": 0.0724,
"step": 5800
},
{
"epoch": 0.049103650315428536,
"grad_norm": 0.053955078125,
"learning_rate": 0.0014095903603365066,
"loss": 0.0724,
"step": 5900
},
{
"epoch": 0.04993591557501207,
"grad_norm": 0.037841796875,
"learning_rate": 0.0013880918526722496,
"loss": 0.071,
"step": 6000
},
{
"epoch": 0.04993591557501207,
"eval_peoplespeech-clean-transcription_loss": 1.5297455787658691,
"eval_peoplespeech-clean-transcription_model_preparation_time": 0.0065,
"eval_peoplespeech-clean-transcription_runtime": 9.3364,
"eval_peoplespeech-clean-transcription_samples_per_second": 6.855,
"eval_peoplespeech-clean-transcription_steps_per_second": 0.857,
"step": 6000
},
{
"epoch": 0.050768180834595604,
"grad_norm": 0.05029296875,
"learning_rate": 0.0013663800381682463,
"loss": 0.0713,
"step": 6100
},
{
"epoch": 0.05160044609417914,
"grad_norm": 0.037109375,
"learning_rate": 0.0013444668502843329,
"loss": 0.0701,
"step": 6200
},
{
"epoch": 0.05243271135376267,
"grad_norm": 0.03564453125,
"learning_rate": 0.0013223643331611537,
"loss": 0.0708,
"step": 6300
},
{
"epoch": 0.053264976613346206,
"grad_norm": 0.04443359375,
"learning_rate": 0.001300084635000341,
"loss": 0.0707,
"step": 6400
},
{
"epoch": 0.05409724187292974,
"grad_norm": 0.033935546875,
"learning_rate": 0.0012776400013875004,
"loss": 0.0704,
"step": 6500
},
{
"epoch": 0.054929507132513274,
"grad_norm": 0.046142578125,
"learning_rate": 0.0012550427685616766,
"loss": 0.0707,
"step": 6600
},
{
"epoch": 0.05576177239209681,
"grad_norm": 0.04150390625,
"learning_rate": 0.0012323053566349834,
"loss": 0.0696,
"step": 6700
},
{
"epoch": 0.05659403765168034,
"grad_norm": 0.042236328125,
"learning_rate": 0.0012094402627661448,
"loss": 0.0705,
"step": 6800
},
{
"epoch": 0.057426302911263875,
"grad_norm": 0.035888671875,
"learning_rate": 0.0011864600542916813,
"loss": 0.0686,
"step": 6900
},
{
"epoch": 0.05825856817084741,
"grad_norm": 0.0498046875,
"learning_rate": 0.0011633773618185302,
"loss": 0.0687,
"step": 7000
},
{
"epoch": 0.05825856817084741,
"eval_peoplespeech-clean-transcription_loss": 1.518404483795166,
"eval_peoplespeech-clean-transcription_model_preparation_time": 0.0065,
"eval_peoplespeech-clean-transcription_runtime": 9.4703,
"eval_peoplespeech-clean-transcription_samples_per_second": 6.758,
"eval_peoplespeech-clean-transcription_steps_per_second": 0.845,
"step": 7000
},
{
"epoch": 0.05909083343043095,
"grad_norm": 0.034423828125,
"learning_rate": 0.0011402048722818862,
"loss": 0.0693,
"step": 7100
},
{
"epoch": 0.059923098690014484,
"grad_norm": 0.047119140625,
"learning_rate": 0.0011169553219720827,
"loss": 0.0697,
"step": 7200
}
],
"logging_steps": 100,
"max_steps": 14400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3600,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.626487387599667e+17,
"train_batch_size": 24,
"trial_name": null,
"trial_params": null
}