diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..91fb70c754547bd527655efb9bed47320149b3f2 --- /dev/null +++ b/README.md @@ -0,0 +1,71 @@ +--- +library_name: transformers +license: apache-2.0 +base_model: answerdotai/ModernBERT-large +tags: +- generated_from_trainer +metrics: +- accuracy +- precision +- recall +- f1 +model-index: +- name: answerdotai-ModernBERT-large-finetuned + results: [] +--- + + + +# answerdotai-ModernBERT-large-finetuned + +This model is a fine-tuned version of [answerdotai/ModernBERT-large](https://huggingface.co/answerdotai/ModernBERT-large) on the None dataset. +It achieves the following results on the evaluation set: +- Loss: 0.0153 +- Accuracy: 0.9980 +- Precision: 0.9980 +- Recall: 0.9980 +- F1: 0.9980 + +## Model description + +More information needed + +## Intended uses & limitations + +More information needed + +## Training and evaluation data + +More information needed + +## Training procedure + +### Training hyperparameters + +The following hyperparameters were used during training: +- learning_rate: 4.1905207188250686e-05 +- train_batch_size: 16 +- eval_batch_size: 16 +- seed: 42 +- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments +- lr_scheduler_type: linear +- num_epochs: 5 + +### Training results + +| Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | +|:-------------:|:-----:|:-----:|:---------------:|:--------:|:---------:|:------:|:------:| +| 0.0046 | 1.0 | 3011 | 0.0257 | 0.9962 | 0.9962 | 0.9962 | 0.9962 | +| 0.021 | 2.0 | 6022 | 0.0234 | 0.9959 | 0.9960 | 0.9959 | 0.9960 | +| 0.0001 | 3.0 | 9033 | 0.0194 | 0.9979 | 0.9978 | 0.9979 | 0.9978 | +| 0.0002 | 4.0 | 12044 | 0.0181 | 0.9979 | 0.9978 | 0.9979 | 0.9978 | +| 0.0 | 5.0 | 15055 | 0.0177 | 0.9980 | 0.9980 | 0.9980 | 0.9980 | + + +### Framework versions + +- Transformers 4.48.0.dev0 +- Pytorch 2.5.1+cu124 +- Datasets 3.2.0 +- Tokenizers 0.21.0 diff --git a/config.json b/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9154a9665f6f206c800a69b84596c64df299ac7f --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e73302dfe575e2fae3ec24034f72088b2adb6e6f5c4e4a35e949d0d8c4b78dbb +size 1583351632 diff --git a/training_args.bin b/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..c66e9895cfe5342eab7e17db702ecef2673ee4bc --- /dev/null +++ b/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d77dcef85d4aa9fa4696da036cb727ea773883db77c9535ae75875fc4a5a5c11 +size 5432 diff --git a/trial-0/checkpoint-3011/config.json b/trial-0/checkpoint-3011/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/trial-0/checkpoint-3011/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/trial-0/checkpoint-3011/model.safetensors b/trial-0/checkpoint-3011/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b80dfaef66ac4ed437619bc49970cd919d1af69c --- /dev/null +++ b/trial-0/checkpoint-3011/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e79c24376368ad25cd3c338648c1e61fcbf3c81c96c917991d54a7bad08de0de +size 1583351632 diff --git a/trial-0/checkpoint-3011/optimizer.pt b/trial-0/checkpoint-3011/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..fdb7ce0918fd39c0fb34326293ec64e0cbb3e09e --- /dev/null +++ b/trial-0/checkpoint-3011/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:022244260a3503756427f87fda63355cfa09334e6a2699f150afdd10b1cc0cb3 +size 3166813178 diff --git a/trial-0/checkpoint-3011/rng_state.pth b/trial-0/checkpoint-3011/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..cf3d91c5392ca6b7d7e0880933b7830a896d7c9e --- /dev/null +++ b/trial-0/checkpoint-3011/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:568428d80a25211a390c359ca51b0b20b38ca0607fbc196f106c9841c02d3e59 +size 14244 diff --git a/trial-0/checkpoint-3011/scheduler.pt b/trial-0/checkpoint-3011/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..ccac887beaf23f5e36384f4844059435a7ab5aea --- /dev/null +++ b/trial-0/checkpoint-3011/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f8a6aef7194053a2f13849a184537c69f114fdea257f4aa4eef32c5d0218928 +size 1064 diff --git a/trial-0/checkpoint-3011/trainer_state.json b/trial-0/checkpoint-3011/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..b1de20eea1f5d58c35d349465f6e44383dfd2091 --- /dev/null +++ b/trial-0/checkpoint-3011/trainer_state.json @@ -0,0 +1,465 @@ +{ + "best_metric": 0.02747241035103798, + "best_model_checkpoint": "./results/answerdotai/ModernBERT-large/trial-0/checkpoint-3011", + "epoch": 1.0, + "eval_steps": 500, + "global_step": 3011, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.016605778811026237, + "grad_norm": 8.580768585205078, + "learning_rate": 5.396357633246935e-06, + "loss": 0.4089, + "step": 50 + }, + { + "epoch": 0.033211557622052475, + "grad_norm": 4.984262466430664, + "learning_rate": 5.385132993856705e-06, + "loss": 0.1911, + "step": 100 + }, + { + "epoch": 0.04981733643307871, + "grad_norm": 145.00692749023438, + "learning_rate": 5.373908354466475e-06, + "loss": 0.1545, + "step": 150 + }, + { + "epoch": 0.06642311524410495, + "grad_norm": 2.051618814468384, + "learning_rate": 5.3626837150762455e-06, + "loss": 0.0941, + "step": 200 + }, + { + "epoch": 0.08302889405513118, + "grad_norm": 0.19901786744594574, + "learning_rate": 5.351459075686016e-06, + "loss": 0.0327, + "step": 250 + }, + { + "epoch": 0.09963467286615742, + "grad_norm": 0.003997017629444599, + "learning_rate": 5.340234436295786e-06, + "loss": 0.0585, + "step": 300 + }, + { + "epoch": 0.11624045167718366, + "grad_norm": 0.016547370702028275, + "learning_rate": 5.329009796905555e-06, + "loss": 0.0461, + "step": 350 + }, + { + "epoch": 0.1328462304882099, + "grad_norm": 5.060605049133301, + "learning_rate": 5.317785157515326e-06, + "loss": 0.0634, + "step": 400 + }, + { + "epoch": 0.14945200929923613, + "grad_norm": 0.013804232701659203, + "learning_rate": 5.306560518125096e-06, + "loss": 0.0538, + "step": 450 + }, + { + "epoch": 0.16605778811026237, + "grad_norm": 0.056022025644779205, + "learning_rate": 5.295335878734865e-06, + "loss": 0.0283, + "step": 500 + }, + { + "epoch": 0.1826635669212886, + "grad_norm": 0.0030180325265973806, + "learning_rate": 5.284111239344636e-06, + "loss": 0.0278, + "step": 550 + }, + { + "epoch": 0.19926934573231483, + "grad_norm": 0.14190103113651276, + "learning_rate": 5.272886599954406e-06, + "loss": 0.0486, + "step": 600 + }, + { + "epoch": 0.2158751245433411, + "grad_norm": 23.922273635864258, + "learning_rate": 5.261661960564176e-06, + "loss": 0.0757, + "step": 650 + }, + { + "epoch": 0.23248090335436733, + "grad_norm": 0.017755718901753426, + "learning_rate": 5.250437321173947e-06, + "loss": 0.0503, + "step": 700 + }, + { + "epoch": 0.24908668216539356, + "grad_norm": 0.1306287795305252, + "learning_rate": 5.239212681783716e-06, + "loss": 0.0369, + "step": 750 + }, + { + "epoch": 0.2656924609764198, + "grad_norm": 0.0014153249794617295, + "learning_rate": 5.227988042393486e-06, + "loss": 0.0284, + "step": 800 + }, + { + "epoch": 0.282298239787446, + "grad_norm": 13.222740173339844, + "learning_rate": 5.216763403003257e-06, + "loss": 0.0685, + "step": 850 + }, + { + "epoch": 0.29890401859847227, + "grad_norm": 0.01663159392774105, + "learning_rate": 5.205538763613026e-06, + "loss": 0.0377, + "step": 900 + }, + { + "epoch": 0.3155097974094985, + "grad_norm": 9.587738037109375, + "learning_rate": 5.194314124222796e-06, + "loss": 0.0284, + "step": 950 + }, + { + "epoch": 0.33211557622052473, + "grad_norm": 0.0031755813397467136, + "learning_rate": 5.183089484832566e-06, + "loss": 0.0287, + "step": 1000 + }, + { + "epoch": 0.348721355031551, + "grad_norm": 0.002594751538708806, + "learning_rate": 5.1718648454423365e-06, + "loss": 0.0239, + "step": 1050 + }, + { + "epoch": 0.3653271338425772, + "grad_norm": 0.0008383135427720845, + "learning_rate": 5.160640206052107e-06, + "loss": 0.0163, + "step": 1100 + }, + { + "epoch": 0.38193291265360346, + "grad_norm": 0.0018693436868488789, + "learning_rate": 5.149415566661877e-06, + "loss": 0.0173, + "step": 1150 + }, + { + "epoch": 0.39853869146462967, + "grad_norm": 3.5105800634482875e-05, + "learning_rate": 5.138190927271647e-06, + "loss": 0.0144, + "step": 1200 + }, + { + "epoch": 0.41514447027565593, + "grad_norm": 0.04262986406683922, + "learning_rate": 5.126966287881418e-06, + "loss": 0.0225, + "step": 1250 + }, + { + "epoch": 0.4317502490866822, + "grad_norm": 0.257622092962265, + "learning_rate": 5.115741648491187e-06, + "loss": 0.0458, + "step": 1300 + }, + { + "epoch": 0.4483560278977084, + "grad_norm": 0.057092875242233276, + "learning_rate": 5.104517009100957e-06, + "loss": 0.015, + "step": 1350 + }, + { + "epoch": 0.46496180670873466, + "grad_norm": 0.009775767102837563, + "learning_rate": 5.093292369710727e-06, + "loss": 0.0002, + "step": 1400 + }, + { + "epoch": 0.48156758551976087, + "grad_norm": 0.16755907237529755, + "learning_rate": 5.082067730320497e-06, + "loss": 0.0119, + "step": 1450 + }, + { + "epoch": 0.4981733643307871, + "grad_norm": 0.0004505734541453421, + "learning_rate": 5.0708430909302675e-06, + "loss": 0.0107, + "step": 1500 + }, + { + "epoch": 0.5147791431418134, + "grad_norm": 0.00015653851733077317, + "learning_rate": 5.059618451540038e-06, + "loss": 0.0002, + "step": 1550 + }, + { + "epoch": 0.5313849219528396, + "grad_norm": 0.1482504904270172, + "learning_rate": 5.048393812149808e-06, + "loss": 0.0221, + "step": 1600 + }, + { + "epoch": 0.5479907007638658, + "grad_norm": 0.00016298597620334476, + "learning_rate": 5.037169172759578e-06, + "loss": 0.0281, + "step": 1650 + }, + { + "epoch": 0.564596479574892, + "grad_norm": 0.0018974934937432408, + "learning_rate": 5.025944533369348e-06, + "loss": 0.0152, + "step": 1700 + }, + { + "epoch": 0.5812022583859183, + "grad_norm": 0.020836347714066505, + "learning_rate": 5.014719893979118e-06, + "loss": 0.0298, + "step": 1750 + }, + { + "epoch": 0.5978080371969445, + "grad_norm": 0.0017976267263293266, + "learning_rate": 5.003495254588888e-06, + "loss": 0.0163, + "step": 1800 + }, + { + "epoch": 0.6144138160079707, + "grad_norm": 0.007485950365662575, + "learning_rate": 4.992270615198658e-06, + "loss": 0.0196, + "step": 1850 + }, + { + "epoch": 0.631019594818997, + "grad_norm": 0.00043688452569767833, + "learning_rate": 4.981045975808428e-06, + "loss": 0.0118, + "step": 1900 + }, + { + "epoch": 0.6476253736300233, + "grad_norm": 0.0006113427225500345, + "learning_rate": 4.9698213364181985e-06, + "loss": 0.0084, + "step": 1950 + }, + { + "epoch": 0.6642311524410495, + "grad_norm": 0.001146040391176939, + "learning_rate": 4.958596697027969e-06, + "loss": 0.0001, + "step": 2000 + }, + { + "epoch": 0.6808369312520757, + "grad_norm": 0.022079484537243843, + "learning_rate": 4.947372057637739e-06, + "loss": 0.0135, + "step": 2050 + }, + { + "epoch": 0.697442710063102, + "grad_norm": 0.000534936785697937, + "learning_rate": 4.936147418247508e-06, + "loss": 0.0013, + "step": 2100 + }, + { + "epoch": 0.7140484888741282, + "grad_norm": 0.19137096405029297, + "learning_rate": 4.924922778857279e-06, + "loss": 0.0242, + "step": 2150 + }, + { + "epoch": 0.7306542676851544, + "grad_norm": 2.9649052521563135e-05, + "learning_rate": 4.913698139467049e-06, + "loss": 0.0048, + "step": 2200 + }, + { + "epoch": 0.7472600464961807, + "grad_norm": 0.0003890593070536852, + "learning_rate": 4.902473500076818e-06, + "loss": 0.0055, + "step": 2250 + }, + { + "epoch": 0.7638658253072069, + "grad_norm": 7.795329293003306e-05, + "learning_rate": 4.891248860686589e-06, + "loss": 0.0062, + "step": 2300 + }, + { + "epoch": 0.7804716041182331, + "grad_norm": 3.511108661768958e-05, + "learning_rate": 4.8800242212963586e-06, + "loss": 0.0032, + "step": 2350 + }, + { + "epoch": 0.7970773829292593, + "grad_norm": 0.0023988874163478613, + "learning_rate": 4.868799581906129e-06, + "loss": 0.0256, + "step": 2400 + }, + { + "epoch": 0.8136831617402857, + "grad_norm": 2.8709350772260223e-06, + "learning_rate": 4.8575749425159e-06, + "loss": 0.0, + "step": 2450 + }, + { + "epoch": 0.8302889405513119, + "grad_norm": 6.131248665042222e-05, + "learning_rate": 4.846350303125669e-06, + "loss": 0.0002, + "step": 2500 + }, + { + "epoch": 0.8468947193623381, + "grad_norm": 0.4060337245464325, + "learning_rate": 4.835125663735439e-06, + "loss": 0.0113, + "step": 2550 + }, + { + "epoch": 0.8635004981733644, + "grad_norm": 0.14530286192893982, + "learning_rate": 4.823901024345209e-06, + "loss": 0.0177, + "step": 2600 + }, + { + "epoch": 0.8801062769843906, + "grad_norm": 0.00015053209790494293, + "learning_rate": 4.812676384954979e-06, + "loss": 0.0083, + "step": 2650 + }, + { + "epoch": 0.8967120557954168, + "grad_norm": 2.9383112632785924e-05, + "learning_rate": 4.801451745564749e-06, + "loss": 0.0, + "step": 2700 + }, + { + "epoch": 0.913317834606443, + "grad_norm": 0.0002987831539940089, + "learning_rate": 4.7902271061745195e-06, + "loss": 0.0013, + "step": 2750 + }, + { + "epoch": 0.9299236134174693, + "grad_norm": 9.85327574198891e-07, + "learning_rate": 4.7790024667842896e-06, + "loss": 0.0009, + "step": 2800 + }, + { + "epoch": 0.9465293922284955, + "grad_norm": 0.0004204445576760918, + "learning_rate": 4.76777782739406e-06, + "loss": 0.0, + "step": 2850 + }, + { + "epoch": 0.9631351710395217, + "grad_norm": 0.02836577780544758, + "learning_rate": 4.75655318800383e-06, + "loss": 0.0017, + "step": 2900 + }, + { + "epoch": 0.9797409498505479, + "grad_norm": 12.173595428466797, + "learning_rate": 4.7453285486136e-06, + "loss": 0.0284, + "step": 2950 + }, + { + "epoch": 0.9963467286615743, + "grad_norm": 0.0006245356635190547, + "learning_rate": 4.73410390922337e-06, + "loss": 0.0003, + "step": 3000 + }, + { + "epoch": 1.0, + "eval_accuracy": 0.9964217557251909, + "eval_f1": 0.9964322542520268, + "eval_loss": 0.02747241035103798, + "eval_precision": 0.9964456382767726, + "eval_recall": 0.9964217557251909, + "eval_runtime": 70.9146, + "eval_samples_per_second": 118.227, + "eval_steps_per_second": 7.389, + "step": 3011 + } + ], + "logging_steps": 50, + "max_steps": 24088, + "num_input_tokens_seen": 0, + "num_train_epochs": 8, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 5.094247305025536e+16, + "train_batch_size": 16, + "trial_name": null, + "trial_params": null +} diff --git a/trial-0/checkpoint-3011/training_args.bin b/trial-0/checkpoint-3011/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..49941315a88326f004a61253e858c9f8c45f253e --- /dev/null +++ b/trial-0/checkpoint-3011/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2d43680079f69ace9b061a9cddc7889b3b2058a373dcb9cabe67e7a0b586646 +size 5368 diff --git a/trial-1/checkpoint-15055/config.json b/trial-1/checkpoint-15055/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/trial-1/checkpoint-15055/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/trial-1/checkpoint-15055/model.safetensors b/trial-1/checkpoint-15055/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6608c7e8a47eebd39582252638858594a76d8128 --- /dev/null +++ b/trial-1/checkpoint-15055/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce6a1060ef93e1b5dc13c4ed9a3b6c77d2c792f107b20ff17933b9d4c15ecf0a +size 1583351632 diff --git a/trial-1/checkpoint-15055/optimizer.pt b/trial-1/checkpoint-15055/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..c3bd126a614c45bfa549449165df0c4d57205768 --- /dev/null +++ b/trial-1/checkpoint-15055/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92795446fb40fea40977d388996f597e156f884cc50faed369041fd42add878a +size 3166813178 diff --git a/trial-1/checkpoint-15055/rng_state.pth b/trial-1/checkpoint-15055/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..04c9f9e92852535232e85b846de38b177134d2f9 --- /dev/null +++ b/trial-1/checkpoint-15055/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aeb45f9d32a7f047001ed27e329de64c1bf9ebcb398e7f4734ee49c0dcd24d49 +size 14244 diff --git a/trial-1/checkpoint-15055/scheduler.pt b/trial-1/checkpoint-15055/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..fb26e030b542d8ae820204bb0e4b44dcc4cb5301 --- /dev/null +++ b/trial-1/checkpoint-15055/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f4201dceb29ec1f3f21db3295b8c8fbe60d56c450ea048b5457e0cc4bfa0108 +size 1064 diff --git a/trial-1/checkpoint-15055/trainer_state.json b/trial-1/checkpoint-15055/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..c52d7f32a15354f4a5eee3b0bcd8f74069791c20 --- /dev/null +++ b/trial-1/checkpoint-15055/trainer_state.json @@ -0,0 +1,2200 @@ +{ + "best_metric": 0.026163995265960693, + "best_model_checkpoint": "./results/answerdotai/ModernBERT-large/trial-1/checkpoint-15055", + "epoch": 5.0, + "eval_steps": 500, + "global_step": 15055, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.016605778811026237, + "grad_norm": 97.8153076171875, + "learning_rate": 4.1822378752775576e-05, + "loss": 0.332, + "step": 50 + }, + { + "epoch": 0.033211557622052475, + "grad_norm": 0.13549020886421204, + "learning_rate": 4.1722929523801454e-05, + "loss": 0.1561, + "step": 100 + }, + { + "epoch": 0.04981733643307871, + "grad_norm": 30.323139190673828, + "learning_rate": 4.162348029482734e-05, + "loss": 0.1222, + "step": 150 + }, + { + "epoch": 0.06642311524410495, + "grad_norm": 0.0052522895857691765, + "learning_rate": 4.1524031065853216e-05, + "loss": 0.0938, + "step": 200 + }, + { + "epoch": 0.08302889405513118, + "grad_norm": 8.804322242736816, + "learning_rate": 4.14245818368791e-05, + "loss": 0.0887, + "step": 250 + }, + { + "epoch": 0.09963467286615742, + "grad_norm": 0.09965868294239044, + "learning_rate": 4.1325132607904985e-05, + "loss": 0.0998, + "step": 300 + }, + { + "epoch": 0.11624045167718366, + "grad_norm": 0.03749964013695717, + "learning_rate": 4.122568337893087e-05, + "loss": 0.0552, + "step": 350 + }, + { + "epoch": 0.1328462304882099, + "grad_norm": 0.2192620187997818, + "learning_rate": 4.112623414995675e-05, + "loss": 0.0717, + "step": 400 + }, + { + "epoch": 0.14945200929923613, + "grad_norm": 136.17337036132812, + "learning_rate": 4.102678492098263e-05, + "loss": 0.0343, + "step": 450 + }, + { + "epoch": 0.16605778811026237, + "grad_norm": 0.006200531497597694, + "learning_rate": 4.092733569200851e-05, + "loss": 0.0584, + "step": 500 + }, + { + "epoch": 0.1826635669212886, + "grad_norm": 0.032046183943748474, + "learning_rate": 4.0827886463034394e-05, + "loss": 0.0458, + "step": 550 + }, + { + "epoch": 0.19926934573231483, + "grad_norm": 3.6847121715545654, + "learning_rate": 4.072843723406027e-05, + "loss": 0.0463, + "step": 600 + }, + { + "epoch": 0.2158751245433411, + "grad_norm": 9.91704273223877, + "learning_rate": 4.0628988005086156e-05, + "loss": 0.044, + "step": 650 + }, + { + "epoch": 0.23248090335436733, + "grad_norm": 0.02639286033809185, + "learning_rate": 4.052953877611204e-05, + "loss": 0.0353, + "step": 700 + }, + { + "epoch": 0.24908668216539356, + "grad_norm": 0.007340637035667896, + "learning_rate": 4.0430089547137925e-05, + "loss": 0.0519, + "step": 750 + }, + { + "epoch": 0.2656924609764198, + "grad_norm": 0.003980421461164951, + "learning_rate": 4.033064031816381e-05, + "loss": 0.0588, + "step": 800 + }, + { + "epoch": 0.282298239787446, + "grad_norm": 1.6867443323135376, + "learning_rate": 4.023119108918969e-05, + "loss": 0.0766, + "step": 850 + }, + { + "epoch": 0.29890401859847227, + "grad_norm": 0.07216636836528778, + "learning_rate": 4.013174186021557e-05, + "loss": 0.0646, + "step": 900 + }, + { + "epoch": 0.3155097974094985, + "grad_norm": 0.0015164370415732265, + "learning_rate": 4.003229263124145e-05, + "loss": 0.0342, + "step": 950 + }, + { + "epoch": 0.33211557622052473, + "grad_norm": 0.004687592852860689, + "learning_rate": 3.993284340226733e-05, + "loss": 0.0327, + "step": 1000 + }, + { + "epoch": 0.348721355031551, + "grad_norm": 0.0013525976100936532, + "learning_rate": 3.983339417329321e-05, + "loss": 0.0362, + "step": 1050 + }, + { + "epoch": 0.3653271338425772, + "grad_norm": 0.021217679604887962, + "learning_rate": 3.9733944944319096e-05, + "loss": 0.0251, + "step": 1100 + }, + { + "epoch": 0.38193291265360346, + "grad_norm": 0.0011278189485892653, + "learning_rate": 3.963449571534498e-05, + "loss": 0.0136, + "step": 1150 + }, + { + "epoch": 0.39853869146462967, + "grad_norm": 0.0034522530622780323, + "learning_rate": 3.9535046486370864e-05, + "loss": 0.021, + "step": 1200 + }, + { + "epoch": 0.41514447027565593, + "grad_norm": 0.20626066625118256, + "learning_rate": 3.943559725739674e-05, + "loss": 0.0188, + "step": 1250 + }, + { + "epoch": 0.4317502490866822, + "grad_norm": 0.11095874011516571, + "learning_rate": 3.9336148028422627e-05, + "loss": 0.0275, + "step": 1300 + }, + { + "epoch": 0.4483560278977084, + "grad_norm": 0.09171418100595474, + "learning_rate": 3.9236698799448504e-05, + "loss": 0.0386, + "step": 1350 + }, + { + "epoch": 0.46496180670873466, + "grad_norm": 0.004308766219764948, + "learning_rate": 3.913724957047439e-05, + "loss": 0.0199, + "step": 1400 + }, + { + "epoch": 0.48156758551976087, + "grad_norm": 0.099054716527462, + "learning_rate": 3.9037800341500266e-05, + "loss": 0.0349, + "step": 1450 + }, + { + "epoch": 0.4981733643307871, + "grad_norm": 0.000510412035509944, + "learning_rate": 3.893835111252615e-05, + "loss": 0.0405, + "step": 1500 + }, + { + "epoch": 0.5147791431418134, + "grad_norm": 0.00045745610259473324, + "learning_rate": 3.8838901883552035e-05, + "loss": 0.0081, + "step": 1550 + }, + { + "epoch": 0.5313849219528396, + "grad_norm": 2.640720844268799, + "learning_rate": 3.873945265457791e-05, + "loss": 0.0312, + "step": 1600 + }, + { + "epoch": 0.5479907007638658, + "grad_norm": 0.0317668542265892, + "learning_rate": 3.86400034256038e-05, + "loss": 0.0231, + "step": 1650 + }, + { + "epoch": 0.564596479574892, + "grad_norm": 0.0374116413295269, + "learning_rate": 3.854055419662968e-05, + "loss": 0.0062, + "step": 1700 + }, + { + "epoch": 0.5812022583859183, + "grad_norm": 3.7858481407165527, + "learning_rate": 3.8441104967655566e-05, + "loss": 0.0172, + "step": 1750 + }, + { + "epoch": 0.5978080371969445, + "grad_norm": 0.006008578464388847, + "learning_rate": 3.8341655738681444e-05, + "loss": 0.0317, + "step": 1800 + }, + { + "epoch": 0.6144138160079707, + "grad_norm": 0.009135287255048752, + "learning_rate": 3.824220650970733e-05, + "loss": 0.0267, + "step": 1850 + }, + { + "epoch": 0.631019594818997, + "grad_norm": 0.004011407028883696, + "learning_rate": 3.8142757280733206e-05, + "loss": 0.0218, + "step": 1900 + }, + { + "epoch": 0.6476253736300233, + "grad_norm": 1.2146693468093872, + "learning_rate": 3.804330805175909e-05, + "loss": 0.0284, + "step": 1950 + }, + { + "epoch": 0.6642311524410495, + "grad_norm": 0.0033848509192466736, + "learning_rate": 3.794385882278497e-05, + "loss": 0.0241, + "step": 2000 + }, + { + "epoch": 0.6808369312520757, + "grad_norm": 0.0062151020392775536, + "learning_rate": 3.784440959381085e-05, + "loss": 0.0126, + "step": 2050 + }, + { + "epoch": 0.697442710063102, + "grad_norm": 0.004486024379730225, + "learning_rate": 3.774496036483674e-05, + "loss": 0.01, + "step": 2100 + }, + { + "epoch": 0.7140484888741282, + "grad_norm": 0.2388181835412979, + "learning_rate": 3.764551113586262e-05, + "loss": 0.029, + "step": 2150 + }, + { + "epoch": 0.7306542676851544, + "grad_norm": 0.0008192298118956387, + "learning_rate": 3.75460619068885e-05, + "loss": 0.0042, + "step": 2200 + }, + { + "epoch": 0.7472600464961807, + "grad_norm": 5.3397710871649906e-05, + "learning_rate": 3.7446612677914384e-05, + "loss": 0.003, + "step": 2250 + }, + { + "epoch": 0.7638658253072069, + "grad_norm": 0.020504744723439217, + "learning_rate": 3.734716344894026e-05, + "loss": 0.0262, + "step": 2300 + }, + { + "epoch": 0.7804716041182331, + "grad_norm": 0.0005639814771711826, + "learning_rate": 3.7247714219966146e-05, + "loss": 0.0043, + "step": 2350 + }, + { + "epoch": 0.7970773829292593, + "grad_norm": 0.0031704490538686514, + "learning_rate": 3.714826499099203e-05, + "loss": 0.0284, + "step": 2400 + }, + { + "epoch": 0.8136831617402857, + "grad_norm": 0.001501227729022503, + "learning_rate": 3.704881576201791e-05, + "loss": 0.0139, + "step": 2450 + }, + { + "epoch": 0.8302889405513119, + "grad_norm": 0.0010695902165025473, + "learning_rate": 3.694936653304379e-05, + "loss": 0.0002, + "step": 2500 + }, + { + "epoch": 0.8468947193623381, + "grad_norm": 0.0022703870199620724, + "learning_rate": 3.684991730406968e-05, + "loss": 0.0127, + "step": 2550 + }, + { + "epoch": 0.8635004981733644, + "grad_norm": 0.057379350066185, + "learning_rate": 3.675046807509556e-05, + "loss": 0.0185, + "step": 2600 + }, + { + "epoch": 0.8801062769843906, + "grad_norm": 0.00022332114167511463, + "learning_rate": 3.665101884612144e-05, + "loss": 0.0305, + "step": 2650 + }, + { + "epoch": 0.8967120557954168, + "grad_norm": 0.0001592856424394995, + "learning_rate": 3.6551569617147324e-05, + "loss": 0.0123, + "step": 2700 + }, + { + "epoch": 0.913317834606443, + "grad_norm": 17.401182174682617, + "learning_rate": 3.64521203881732e-05, + "loss": 0.0185, + "step": 2750 + }, + { + "epoch": 0.9299236134174693, + "grad_norm": 6.72700916766189e-05, + "learning_rate": 3.6352671159199086e-05, + "loss": 0.0069, + "step": 2800 + }, + { + "epoch": 0.9465293922284955, + "grad_norm": 0.004428895190358162, + "learning_rate": 3.6253221930224964e-05, + "loss": 0.0007, + "step": 2850 + }, + { + "epoch": 0.9631351710395217, + "grad_norm": 1.0974332094192505, + "learning_rate": 3.615377270125085e-05, + "loss": 0.0404, + "step": 2900 + }, + { + "epoch": 0.9797409498505479, + "grad_norm": 1.9213542938232422, + "learning_rate": 3.605432347227673e-05, + "loss": 0.0347, + "step": 2950 + }, + { + "epoch": 0.9963467286615743, + "grad_norm": 0.00922548957169056, + "learning_rate": 3.595487424330261e-05, + "loss": 0.0012, + "step": 3000 + }, + { + "epoch": 1.0, + "eval_accuracy": 0.9964217557251909, + "eval_f1": 0.9964374533901769, + "eval_loss": 0.0292544886469841, + "eval_precision": 0.9964596067772543, + "eval_recall": 0.9964217557251909, + "eval_runtime": 66.9274, + "eval_samples_per_second": 125.27, + "eval_steps_per_second": 7.829, + "step": 3011 + }, + { + "epoch": 1.0129525074726005, + "grad_norm": 0.0020659081637859344, + "learning_rate": 3.5855425014328495e-05, + "loss": 0.006, + "step": 3050 + }, + { + "epoch": 1.0295582862836268, + "grad_norm": 0.0007023397483862936, + "learning_rate": 3.575597578535438e-05, + "loss": 0.0252, + "step": 3100 + }, + { + "epoch": 1.0461640650946529, + "grad_norm": 0.0009625868406146765, + "learning_rate": 3.565652655638026e-05, + "loss": 0.0009, + "step": 3150 + }, + { + "epoch": 1.0627698439056792, + "grad_norm": 0.003358361078426242, + "learning_rate": 3.555707732740614e-05, + "loss": 0.0023, + "step": 3200 + }, + { + "epoch": 1.0793756227167055, + "grad_norm": 0.0069133201614022255, + "learning_rate": 3.5457628098432026e-05, + "loss": 0.0173, + "step": 3250 + }, + { + "epoch": 1.0959814015277316, + "grad_norm": 0.002833959646522999, + "learning_rate": 3.5358178869457903e-05, + "loss": 0.0103, + "step": 3300 + }, + { + "epoch": 1.112587180338758, + "grad_norm": 0.3491974174976349, + "learning_rate": 3.525872964048379e-05, + "loss": 0.0106, + "step": 3350 + }, + { + "epoch": 1.1291929591497842, + "grad_norm": 0.002338677179068327, + "learning_rate": 3.5159280411509666e-05, + "loss": 0.0113, + "step": 3400 + }, + { + "epoch": 1.1457987379608103, + "grad_norm": 0.01391940750181675, + "learning_rate": 3.505983118253555e-05, + "loss": 0.0629, + "step": 3450 + }, + { + "epoch": 1.1624045167718366, + "grad_norm": 0.01091580931097269, + "learning_rate": 3.4960381953561435e-05, + "loss": 0.0124, + "step": 3500 + }, + { + "epoch": 1.1790102955828627, + "grad_norm": 0.08861105889081955, + "learning_rate": 3.486093272458732e-05, + "loss": 0.005, + "step": 3550 + }, + { + "epoch": 1.195616074393889, + "grad_norm": 13.801424980163574, + "learning_rate": 3.47614834956132e-05, + "loss": 0.0198, + "step": 3600 + }, + { + "epoch": 1.2122218532049154, + "grad_norm": 0.0011831964366137981, + "learning_rate": 3.466203426663908e-05, + "loss": 0.0199, + "step": 3650 + }, + { + "epoch": 1.2288276320159415, + "grad_norm": 0.0007713422528468072, + "learning_rate": 3.456258503766496e-05, + "loss": 0.0002, + "step": 3700 + }, + { + "epoch": 1.2454334108269678, + "grad_norm": 0.012040040455758572, + "learning_rate": 3.446313580869084e-05, + "loss": 0.0145, + "step": 3750 + }, + { + "epoch": 1.2620391896379939, + "grad_norm": 0.002991499612107873, + "learning_rate": 3.436368657971672e-05, + "loss": 0.0019, + "step": 3800 + }, + { + "epoch": 1.2786449684490202, + "grad_norm": 0.0011237855069339275, + "learning_rate": 3.4264237350742605e-05, + "loss": 0.0121, + "step": 3850 + }, + { + "epoch": 1.2952507472600465, + "grad_norm": 0.0003940507594961673, + "learning_rate": 3.416478812176849e-05, + "loss": 0.0005, + "step": 3900 + }, + { + "epoch": 1.3118565260710726, + "grad_norm": 0.000680712575558573, + "learning_rate": 3.4065338892794374e-05, + "loss": 0.0064, + "step": 3950 + }, + { + "epoch": 1.328462304882099, + "grad_norm": 0.0005092715146020055, + "learning_rate": 3.396588966382026e-05, + "loss": 0.0015, + "step": 4000 + }, + { + "epoch": 1.3450680836931252, + "grad_norm": 0.05994075909256935, + "learning_rate": 3.3866440434846137e-05, + "loss": 0.012, + "step": 4050 + }, + { + "epoch": 1.3616738625041513, + "grad_norm": 0.00185777444858104, + "learning_rate": 3.376699120587202e-05, + "loss": 0.011, + "step": 4100 + }, + { + "epoch": 1.3782796413151777, + "grad_norm": 0.000904878368601203, + "learning_rate": 3.36675419768979e-05, + "loss": 0.005, + "step": 4150 + }, + { + "epoch": 1.394885420126204, + "grad_norm": 0.0007558612269349396, + "learning_rate": 3.356809274792378e-05, + "loss": 0.0047, + "step": 4200 + }, + { + "epoch": 1.41149119893723, + "grad_norm": 0.000539736298378557, + "learning_rate": 3.346864351894966e-05, + "loss": 0.0129, + "step": 4250 + }, + { + "epoch": 1.4280969777482564, + "grad_norm": 0.009128957986831665, + "learning_rate": 3.3369194289975545e-05, + "loss": 0.0243, + "step": 4300 + }, + { + "epoch": 1.4447027565592827, + "grad_norm": 0.0029375357553362846, + "learning_rate": 3.326974506100143e-05, + "loss": 0.0265, + "step": 4350 + }, + { + "epoch": 1.4613085353703088, + "grad_norm": 0.005572469439357519, + "learning_rate": 3.317029583202731e-05, + "loss": 0.0208, + "step": 4400 + }, + { + "epoch": 1.4779143141813351, + "grad_norm": 0.022462476044893265, + "learning_rate": 3.307084660305319e-05, + "loss": 0.028, + "step": 4450 + }, + { + "epoch": 1.4945200929923614, + "grad_norm": 0.03258312866091728, + "learning_rate": 3.2971397374079076e-05, + "loss": 0.0039, + "step": 4500 + }, + { + "epoch": 1.5111258718033875, + "grad_norm": 0.0003238330245949328, + "learning_rate": 3.2871948145104954e-05, + "loss": 0.0004, + "step": 4550 + }, + { + "epoch": 1.5277316506144138, + "grad_norm": 0.0003890040097758174, + "learning_rate": 3.277249891613084e-05, + "loss": 0.0019, + "step": 4600 + }, + { + "epoch": 1.5443374294254402, + "grad_norm": 0.0009322063415311277, + "learning_rate": 3.2673049687156716e-05, + "loss": 0.0179, + "step": 4650 + }, + { + "epoch": 1.5609432082364663, + "grad_norm": 0.01131466869264841, + "learning_rate": 3.25736004581826e-05, + "loss": 0.0104, + "step": 4700 + }, + { + "epoch": 1.5775489870474926, + "grad_norm": 13.522896766662598, + "learning_rate": 3.247415122920848e-05, + "loss": 0.0223, + "step": 4750 + }, + { + "epoch": 1.594154765858519, + "grad_norm": 0.02276255562901497, + "learning_rate": 3.237470200023436e-05, + "loss": 0.0158, + "step": 4800 + }, + { + "epoch": 1.610760544669545, + "grad_norm": 0.002166257705539465, + "learning_rate": 3.227525277126025e-05, + "loss": 0.0078, + "step": 4850 + }, + { + "epoch": 1.627366323480571, + "grad_norm": 1.3065482378005981, + "learning_rate": 3.217580354228613e-05, + "loss": 0.0141, + "step": 4900 + }, + { + "epoch": 1.6439721022915976, + "grad_norm": 0.003547689877450466, + "learning_rate": 3.2076354313312016e-05, + "loss": 0.015, + "step": 4950 + }, + { + "epoch": 1.6605778811026237, + "grad_norm": 0.014956770464777946, + "learning_rate": 3.1976905084337894e-05, + "loss": 0.0152, + "step": 5000 + }, + { + "epoch": 1.6771836599136498, + "grad_norm": 0.04130621254444122, + "learning_rate": 3.187745585536378e-05, + "loss": 0.0125, + "step": 5050 + }, + { + "epoch": 1.6937894387246761, + "grad_norm": 0.0008221206953749061, + "learning_rate": 3.1778006626389656e-05, + "loss": 0.0135, + "step": 5100 + }, + { + "epoch": 1.7103952175357025, + "grad_norm": 12.82378101348877, + "learning_rate": 3.167855739741554e-05, + "loss": 0.0068, + "step": 5150 + }, + { + "epoch": 1.7270009963467285, + "grad_norm": 0.00033835775684565306, + "learning_rate": 3.157910816844142e-05, + "loss": 0.0162, + "step": 5200 + }, + { + "epoch": 1.7436067751577549, + "grad_norm": 0.019336150959134102, + "learning_rate": 3.14796589394673e-05, + "loss": 0.0233, + "step": 5250 + }, + { + "epoch": 1.7602125539687812, + "grad_norm": 0.0025835856795310974, + "learning_rate": 3.138020971049319e-05, + "loss": 0.0049, + "step": 5300 + }, + { + "epoch": 1.7768183327798073, + "grad_norm": 0.001420403248630464, + "learning_rate": 3.128076048151907e-05, + "loss": 0.003, + "step": 5350 + }, + { + "epoch": 1.7934241115908336, + "grad_norm": 0.00038990622851997614, + "learning_rate": 3.118131125254495e-05, + "loss": 0.0006, + "step": 5400 + }, + { + "epoch": 1.81002989040186, + "grad_norm": 0.008520632050931454, + "learning_rate": 3.1081862023570834e-05, + "loss": 0.0017, + "step": 5450 + }, + { + "epoch": 1.826635669212886, + "grad_norm": 6.68273787596263e-05, + "learning_rate": 3.098241279459671e-05, + "loss": 0.0036, + "step": 5500 + }, + { + "epoch": 1.8432414480239123, + "grad_norm": 0.0003700813394971192, + "learning_rate": 3.0882963565622596e-05, + "loss": 0.0165, + "step": 5550 + }, + { + "epoch": 1.8598472268349386, + "grad_norm": 0.0006010610377416015, + "learning_rate": 3.078351433664848e-05, + "loss": 0.0087, + "step": 5600 + }, + { + "epoch": 1.8764530056459647, + "grad_norm": 0.003066062228754163, + "learning_rate": 3.068406510767436e-05, + "loss": 0.0146, + "step": 5650 + }, + { + "epoch": 1.893058784456991, + "grad_norm": 0.005507585126906633, + "learning_rate": 3.058461587870024e-05, + "loss": 0.0143, + "step": 5700 + }, + { + "epoch": 1.9096645632680174, + "grad_norm": 0.0004035944875795394, + "learning_rate": 3.0485166649726124e-05, + "loss": 0.0005, + "step": 5750 + }, + { + "epoch": 1.9262703420790435, + "grad_norm": 0.001009216532111168, + "learning_rate": 3.0385717420752008e-05, + "loss": 0.0001, + "step": 5800 + }, + { + "epoch": 1.9428761208900698, + "grad_norm": 0.000999059877358377, + "learning_rate": 3.0286268191777886e-05, + "loss": 0.0035, + "step": 5850 + }, + { + "epoch": 1.959481899701096, + "grad_norm": 0.009907165542244911, + "learning_rate": 3.018681896280377e-05, + "loss": 0.0169, + "step": 5900 + }, + { + "epoch": 1.9760876785121222, + "grad_norm": 0.0009444186580367386, + "learning_rate": 3.008736973382965e-05, + "loss": 0.0002, + "step": 5950 + }, + { + "epoch": 1.9926934573231485, + "grad_norm": 0.004821607377380133, + "learning_rate": 2.9987920504855536e-05, + "loss": 0.0279, + "step": 6000 + }, + { + "epoch": 2.0, + "eval_accuracy": 0.9960639312977099, + "eval_f1": 0.9960726064300177, + "eval_loss": 0.026200218126177788, + "eval_precision": 0.9960829017470937, + "eval_recall": 0.9960639312977099, + "eval_runtime": 66.8019, + "eval_samples_per_second": 125.505, + "eval_steps_per_second": 7.844, + "step": 6022 + }, + { + "epoch": 2.009299236134175, + "grad_norm": 0.005393081344664097, + "learning_rate": 2.9888471275881413e-05, + "loss": 0.0005, + "step": 6050 + }, + { + "epoch": 2.025905014945201, + "grad_norm": 0.0005983946030028164, + "learning_rate": 2.9789022046907298e-05, + "loss": 0.007, + "step": 6100 + }, + { + "epoch": 2.042510793756227, + "grad_norm": 0.009896568953990936, + "learning_rate": 2.968957281793318e-05, + "loss": 0.0126, + "step": 6150 + }, + { + "epoch": 2.0591165725672536, + "grad_norm": 0.0005078279646113515, + "learning_rate": 2.9590123588959063e-05, + "loss": 0.0036, + "step": 6200 + }, + { + "epoch": 2.0757223513782797, + "grad_norm": 0.003000972094014287, + "learning_rate": 2.949067435998494e-05, + "loss": 0.0084, + "step": 6250 + }, + { + "epoch": 2.0923281301893057, + "grad_norm": 0.000705482205376029, + "learning_rate": 2.9391225131010826e-05, + "loss": 0.0114, + "step": 6300 + }, + { + "epoch": 2.1089339090003323, + "grad_norm": 0.0010040552588179708, + "learning_rate": 2.929177590203671e-05, + "loss": 0.0001, + "step": 6350 + }, + { + "epoch": 2.1255396878113584, + "grad_norm": 0.00919084157794714, + "learning_rate": 2.919232667306259e-05, + "loss": 0.0132, + "step": 6400 + }, + { + "epoch": 2.1421454666223845, + "grad_norm": 0.0013779608998447657, + "learning_rate": 2.9092877444088476e-05, + "loss": 0.0005, + "step": 6450 + }, + { + "epoch": 2.158751245433411, + "grad_norm": 0.0005553108640015125, + "learning_rate": 2.8993428215114353e-05, + "loss": 0.0024, + "step": 6500 + }, + { + "epoch": 2.175357024244437, + "grad_norm": 0.0021922625601291656, + "learning_rate": 2.8893978986140238e-05, + "loss": 0.0001, + "step": 6550 + }, + { + "epoch": 2.191962803055463, + "grad_norm": 0.0002919725957326591, + "learning_rate": 2.879452975716612e-05, + "loss": 0.0016, + "step": 6600 + }, + { + "epoch": 2.2085685818664897, + "grad_norm": 0.0009692521998658776, + "learning_rate": 2.8695080528192003e-05, + "loss": 0.0, + "step": 6650 + }, + { + "epoch": 2.225174360677516, + "grad_norm": 0.0002649214584380388, + "learning_rate": 2.859563129921788e-05, + "loss": 0.0065, + "step": 6700 + }, + { + "epoch": 2.241780139488542, + "grad_norm": 0.00046473185648210347, + "learning_rate": 2.8496182070243765e-05, + "loss": 0.0045, + "step": 6750 + }, + { + "epoch": 2.2583859182995685, + "grad_norm": 0.00027421273989602923, + "learning_rate": 2.8396732841269647e-05, + "loss": 0.0001, + "step": 6800 + }, + { + "epoch": 2.2749916971105946, + "grad_norm": 0.0005349958664737642, + "learning_rate": 2.829728361229553e-05, + "loss": 0.0064, + "step": 6850 + }, + { + "epoch": 2.2915974759216207, + "grad_norm": 0.013357802294194698, + "learning_rate": 2.819783438332141e-05, + "loss": 0.0166, + "step": 6900 + }, + { + "epoch": 2.308203254732647, + "grad_norm": 0.00018077288405038416, + "learning_rate": 2.8098385154347293e-05, + "loss": 0.0003, + "step": 6950 + }, + { + "epoch": 2.3248090335436733, + "grad_norm": 0.00022416921274270862, + "learning_rate": 2.7998935925373174e-05, + "loss": 0.0258, + "step": 7000 + }, + { + "epoch": 2.3414148123546994, + "grad_norm": 0.0035074173938483, + "learning_rate": 2.789948669639906e-05, + "loss": 0.0235, + "step": 7050 + }, + { + "epoch": 2.3580205911657255, + "grad_norm": 0.03189694508910179, + "learning_rate": 2.7800037467424936e-05, + "loss": 0.0092, + "step": 7100 + }, + { + "epoch": 2.374626369976752, + "grad_norm": 0.003983413800597191, + "learning_rate": 2.770058823845082e-05, + "loss": 0.0178, + "step": 7150 + }, + { + "epoch": 2.391232148787778, + "grad_norm": 0.002308150054886937, + "learning_rate": 2.7601139009476705e-05, + "loss": 0.0038, + "step": 7200 + }, + { + "epoch": 2.407837927598804, + "grad_norm": 0.0013349172659218311, + "learning_rate": 2.7501689780502583e-05, + "loss": 0.001, + "step": 7250 + }, + { + "epoch": 2.4244437064098308, + "grad_norm": 0.000329138885717839, + "learning_rate": 2.7402240551528467e-05, + "loss": 0.0, + "step": 7300 + }, + { + "epoch": 2.441049485220857, + "grad_norm": 0.0008932758355513215, + "learning_rate": 2.730279132255435e-05, + "loss": 0.0, + "step": 7350 + }, + { + "epoch": 2.457655264031883, + "grad_norm": 0.0005681042093783617, + "learning_rate": 2.7203342093580233e-05, + "loss": 0.0, + "step": 7400 + }, + { + "epoch": 2.4742610428429095, + "grad_norm": 0.00035528288572095335, + "learning_rate": 2.710389286460611e-05, + "loss": 0.0006, + "step": 7450 + }, + { + "epoch": 2.4908668216539356, + "grad_norm": 0.0064411167986691, + "learning_rate": 2.7004443635631995e-05, + "loss": 0.0057, + "step": 7500 + }, + { + "epoch": 2.5074726004649617, + "grad_norm": 0.004782046191394329, + "learning_rate": 2.6904994406657876e-05, + "loss": 0.0104, + "step": 7550 + }, + { + "epoch": 2.5240783792759878, + "grad_norm": 0.007697090040892363, + "learning_rate": 2.680554517768376e-05, + "loss": 0.0194, + "step": 7600 + }, + { + "epoch": 2.5406841580870143, + "grad_norm": 0.0027768309228122234, + "learning_rate": 2.670609594870964e-05, + "loss": 0.0003, + "step": 7650 + }, + { + "epoch": 2.5572899368980404, + "grad_norm": 0.004415275063365698, + "learning_rate": 2.6606646719735523e-05, + "loss": 0.0008, + "step": 7700 + }, + { + "epoch": 2.5738957157090665, + "grad_norm": 0.0020732246339321136, + "learning_rate": 2.6507197490761404e-05, + "loss": 0.0099, + "step": 7750 + }, + { + "epoch": 2.590501494520093, + "grad_norm": 0.01230535376816988, + "learning_rate": 2.640774826178729e-05, + "loss": 0.0013, + "step": 7800 + }, + { + "epoch": 2.607107273331119, + "grad_norm": 0.00023796973982825875, + "learning_rate": 2.6308299032813166e-05, + "loss": 0.0001, + "step": 7850 + }, + { + "epoch": 2.6237130521421452, + "grad_norm": 0.0007237173849716783, + "learning_rate": 2.620884980383905e-05, + "loss": 0.0, + "step": 7900 + }, + { + "epoch": 2.6403188309531718, + "grad_norm": 0.012582485564053059, + "learning_rate": 2.6109400574864935e-05, + "loss": 0.0137, + "step": 7950 + }, + { + "epoch": 2.656924609764198, + "grad_norm": 0.004291553515940905, + "learning_rate": 2.6009951345890816e-05, + "loss": 0.0004, + "step": 8000 + }, + { + "epoch": 2.673530388575224, + "grad_norm": 0.001662308000959456, + "learning_rate": 2.59105021169167e-05, + "loss": 0.0001, + "step": 8050 + }, + { + "epoch": 2.6901361673862505, + "grad_norm": 0.0007901078206487, + "learning_rate": 2.5811052887942578e-05, + "loss": 0.0001, + "step": 8100 + }, + { + "epoch": 2.7067419461972766, + "grad_norm": 0.0007069796556606889, + "learning_rate": 2.5711603658968463e-05, + "loss": 0.0001, + "step": 8150 + }, + { + "epoch": 2.7233477250083027, + "grad_norm": 0.0006272114696912467, + "learning_rate": 2.5612154429994344e-05, + "loss": 0.0, + "step": 8200 + }, + { + "epoch": 2.7399535038193292, + "grad_norm": 0.0008849167497828603, + "learning_rate": 2.5512705201020228e-05, + "loss": 0.0108, + "step": 8250 + }, + { + "epoch": 2.7565592826303553, + "grad_norm": 0.0023204211611300707, + "learning_rate": 2.5413255972046106e-05, + "loss": 0.0015, + "step": 8300 + }, + { + "epoch": 2.7731650614413814, + "grad_norm": 0.0006785548175685108, + "learning_rate": 2.531380674307199e-05, + "loss": 0.0001, + "step": 8350 + }, + { + "epoch": 2.789770840252408, + "grad_norm": 0.0012301064562052488, + "learning_rate": 2.521435751409787e-05, + "loss": 0.0001, + "step": 8400 + }, + { + "epoch": 2.806376619063434, + "grad_norm": 0.0012826485326513648, + "learning_rate": 2.5114908285123756e-05, + "loss": 0.0005, + "step": 8450 + }, + { + "epoch": 2.82298239787446, + "grad_norm": 0.00045186831266619265, + "learning_rate": 2.5015459056149634e-05, + "loss": 0.0, + "step": 8500 + }, + { + "epoch": 2.8395881766854867, + "grad_norm": 0.004168716724961996, + "learning_rate": 2.4916009827175518e-05, + "loss": 0.0113, + "step": 8550 + }, + { + "epoch": 2.856193955496513, + "grad_norm": 0.0053441463969647884, + "learning_rate": 2.48165605982014e-05, + "loss": 0.0148, + "step": 8600 + }, + { + "epoch": 2.872799734307539, + "grad_norm": 0.002283324720337987, + "learning_rate": 2.471711136922728e-05, + "loss": 0.0001, + "step": 8650 + }, + { + "epoch": 2.8894055131185654, + "grad_norm": 0.0046396502293646336, + "learning_rate": 2.461766214025316e-05, + "loss": 0.009, + "step": 8700 + }, + { + "epoch": 2.9060112919295915, + "grad_norm": 0.01257924735546112, + "learning_rate": 2.4518212911279046e-05, + "loss": 0.015, + "step": 8750 + }, + { + "epoch": 2.9226170707406176, + "grad_norm": 0.00245088548399508, + "learning_rate": 2.441876368230493e-05, + "loss": 0.0136, + "step": 8800 + }, + { + "epoch": 2.939222849551644, + "grad_norm": 0.005676699336618185, + "learning_rate": 2.4319314453330808e-05, + "loss": 0.005, + "step": 8850 + }, + { + "epoch": 2.9558286283626702, + "grad_norm": 0.0012465333566069603, + "learning_rate": 2.4219865224356692e-05, + "loss": 0.0002, + "step": 8900 + }, + { + "epoch": 2.9724344071736963, + "grad_norm": 0.0007043919176794589, + "learning_rate": 2.4120415995382573e-05, + "loss": 0.0001, + "step": 8950 + }, + { + "epoch": 2.989040185984723, + "grad_norm": 0.007725800387561321, + "learning_rate": 2.4020966766408458e-05, + "loss": 0.0, + "step": 9000 + }, + { + "epoch": 3.0, + "eval_accuracy": 0.9971374045801527, + "eval_f1": 0.9971159280148129, + "eval_loss": 0.030433131381869316, + "eval_precision": 0.9971134119828018, + "eval_recall": 0.9971374045801527, + "eval_runtime": 66.6186, + "eval_samples_per_second": 125.851, + "eval_steps_per_second": 7.866, + "step": 9033 + }, + { + "epoch": 3.005645964795749, + "grad_norm": 0.015563803724944592, + "learning_rate": 2.3921517537434336e-05, + "loss": 0.0121, + "step": 9050 + }, + { + "epoch": 3.022251743606775, + "grad_norm": 0.0009268106077797711, + "learning_rate": 2.382206830846022e-05, + "loss": 0.002, + "step": 9100 + }, + { + "epoch": 3.0388575224178016, + "grad_norm": 0.003995839972048998, + "learning_rate": 2.37226190794861e-05, + "loss": 0.0001, + "step": 9150 + }, + { + "epoch": 3.0554633012288277, + "grad_norm": 0.0004984468687325716, + "learning_rate": 2.3623169850511986e-05, + "loss": 0.0001, + "step": 9200 + }, + { + "epoch": 3.072069080039854, + "grad_norm": 0.0007679828559048474, + "learning_rate": 2.3523720621537863e-05, + "loss": 0.0, + "step": 9250 + }, + { + "epoch": 3.0886748588508803, + "grad_norm": 0.0004945154651068151, + "learning_rate": 2.3424271392563748e-05, + "loss": 0.0, + "step": 9300 + }, + { + "epoch": 3.1052806376619064, + "grad_norm": 0.00045879624667577446, + "learning_rate": 2.332482216358963e-05, + "loss": 0.0, + "step": 9350 + }, + { + "epoch": 3.1218864164729325, + "grad_norm": 0.0004470401909202337, + "learning_rate": 2.3225372934615513e-05, + "loss": 0.0, + "step": 9400 + }, + { + "epoch": 3.1384921952839586, + "grad_norm": 0.00023802775831427425, + "learning_rate": 2.312592370564139e-05, + "loss": 0.0, + "step": 9450 + }, + { + "epoch": 3.155097974094985, + "grad_norm": 0.00043214907054789364, + "learning_rate": 2.3026474476667275e-05, + "loss": 0.0, + "step": 9500 + }, + { + "epoch": 3.1717037529060113, + "grad_norm": 0.00024678235058672726, + "learning_rate": 2.292702524769316e-05, + "loss": 0.0007, + "step": 9550 + }, + { + "epoch": 3.1883095317170373, + "grad_norm": 0.0003429341595619917, + "learning_rate": 2.282757601871904e-05, + "loss": 0.0, + "step": 9600 + }, + { + "epoch": 3.204915310528064, + "grad_norm": 0.00019195489585399628, + "learning_rate": 2.2728126789744925e-05, + "loss": 0.0001, + "step": 9650 + }, + { + "epoch": 3.22152108933909, + "grad_norm": 0.00021821403061039746, + "learning_rate": 2.2628677560770803e-05, + "loss": 0.0, + "step": 9700 + }, + { + "epoch": 3.238126868150116, + "grad_norm": 0.0004384716448839754, + "learning_rate": 2.2529228331796688e-05, + "loss": 0.0, + "step": 9750 + }, + { + "epoch": 3.2547326469611426, + "grad_norm": 0.00017058267258107662, + "learning_rate": 2.242977910282257e-05, + "loss": 0.0, + "step": 9800 + }, + { + "epoch": 3.2713384257721687, + "grad_norm": 0.0007424860959872603, + "learning_rate": 2.2330329873848453e-05, + "loss": 0.0162, + "step": 9850 + }, + { + "epoch": 3.287944204583195, + "grad_norm": 0.0007187157752923667, + "learning_rate": 2.223088064487433e-05, + "loss": 0.0114, + "step": 9900 + }, + { + "epoch": 3.3045499833942213, + "grad_norm": 0.0006962522747926414, + "learning_rate": 2.2131431415900215e-05, + "loss": 0.003, + "step": 9950 + }, + { + "epoch": 3.3211557622052474, + "grad_norm": 0.0007819280726835132, + "learning_rate": 2.2031982186926096e-05, + "loss": 0.005, + "step": 10000 + }, + { + "epoch": 3.3377615410162735, + "grad_norm": 0.00020613332162611187, + "learning_rate": 2.1932532957951977e-05, + "loss": 0.0002, + "step": 10050 + }, + { + "epoch": 3.3543673198273, + "grad_norm": 0.000297825870802626, + "learning_rate": 2.183308372897786e-05, + "loss": 0.0, + "step": 10100 + }, + { + "epoch": 3.370973098638326, + "grad_norm": 7.085188553901389e-05, + "learning_rate": 2.1733634500003743e-05, + "loss": 0.0002, + "step": 10150 + }, + { + "epoch": 3.3875788774493523, + "grad_norm": 0.11117817461490631, + "learning_rate": 2.1634185271029624e-05, + "loss": 0.0196, + "step": 10200 + }, + { + "epoch": 3.404184656260379, + "grad_norm": 0.0007934737950563431, + "learning_rate": 2.1534736042055505e-05, + "loss": 0.0063, + "step": 10250 + }, + { + "epoch": 3.420790435071405, + "grad_norm": 0.0008382880478166044, + "learning_rate": 2.1435286813081386e-05, + "loss": 0.0003, + "step": 10300 + }, + { + "epoch": 3.437396213882431, + "grad_norm": 0.00943814031779766, + "learning_rate": 2.133583758410727e-05, + "loss": 0.0229, + "step": 10350 + }, + { + "epoch": 3.454001992693457, + "grad_norm": 0.0025139478966593742, + "learning_rate": 2.1236388355133155e-05, + "loss": 0.0135, + "step": 10400 + }, + { + "epoch": 3.4706077715044836, + "grad_norm": 0.0012301671085879207, + "learning_rate": 2.1136939126159033e-05, + "loss": 0.0112, + "step": 10450 + }, + { + "epoch": 3.4872135503155097, + "grad_norm": 0.0006923259934410453, + "learning_rate": 2.1037489897184917e-05, + "loss": 0.0023, + "step": 10500 + }, + { + "epoch": 3.503819329126536, + "grad_norm": 0.00143510103225708, + "learning_rate": 2.09380406682108e-05, + "loss": 0.0, + "step": 10550 + }, + { + "epoch": 3.5204251079375624, + "grad_norm": 0.0003266233834438026, + "learning_rate": 2.083859143923668e-05, + "loss": 0.0001, + "step": 10600 + }, + { + "epoch": 3.5370308867485885, + "grad_norm": 0.0002165662735933438, + "learning_rate": 2.073914221026256e-05, + "loss": 0.0001, + "step": 10650 + }, + { + "epoch": 3.5536366655596145, + "grad_norm": 0.000500050897244364, + "learning_rate": 2.0639692981288445e-05, + "loss": 0.0053, + "step": 10700 + }, + { + "epoch": 3.570242444370641, + "grad_norm": 0.00033526355400681496, + "learning_rate": 2.0540243752314326e-05, + "loss": 0.0001, + "step": 10750 + }, + { + "epoch": 3.586848223181667, + "grad_norm": 0.000240692708757706, + "learning_rate": 2.044079452334021e-05, + "loss": 0.0, + "step": 10800 + }, + { + "epoch": 3.6034540019926933, + "grad_norm": 0.0011404575780034065, + "learning_rate": 2.034134529436609e-05, + "loss": 0.0065, + "step": 10850 + }, + { + "epoch": 3.62005978080372, + "grad_norm": 0.0008779458003118634, + "learning_rate": 2.0241896065391973e-05, + "loss": 0.0, + "step": 10900 + }, + { + "epoch": 3.636665559614746, + "grad_norm": 0.000214042782317847, + "learning_rate": 2.0142446836417854e-05, + "loss": 0.0001, + "step": 10950 + }, + { + "epoch": 3.653271338425772, + "grad_norm": 0.001132681965827942, + "learning_rate": 2.0042997607443738e-05, + "loss": 0.0001, + "step": 11000 + }, + { + "epoch": 3.6698771172367985, + "grad_norm": 9.505786874797195e-05, + "learning_rate": 1.994354837846962e-05, + "loss": 0.0, + "step": 11050 + }, + { + "epoch": 3.6864828960478246, + "grad_norm": 0.0014238933799788356, + "learning_rate": 1.98440991494955e-05, + "loss": 0.0108, + "step": 11100 + }, + { + "epoch": 3.7030886748588507, + "grad_norm": 0.0021179679315537214, + "learning_rate": 1.974464992052138e-05, + "loss": 0.0004, + "step": 11150 + }, + { + "epoch": 3.7196944536698773, + "grad_norm": 0.0012716053752228618, + "learning_rate": 1.9645200691547266e-05, + "loss": 0.0001, + "step": 11200 + }, + { + "epoch": 3.7363002324809034, + "grad_norm": 0.0003722730907611549, + "learning_rate": 1.9545751462573147e-05, + "loss": 0.0001, + "step": 11250 + }, + { + "epoch": 3.7529060112919295, + "grad_norm": 0.0004279191780369729, + "learning_rate": 1.9446302233599028e-05, + "loss": 0.0, + "step": 11300 + }, + { + "epoch": 3.769511790102956, + "grad_norm": 0.00044420413905754685, + "learning_rate": 1.934685300462491e-05, + "loss": 0.0, + "step": 11350 + }, + { + "epoch": 3.786117568913982, + "grad_norm": 0.0006720417295582592, + "learning_rate": 1.9247403775650794e-05, + "loss": 0.0001, + "step": 11400 + }, + { + "epoch": 3.802723347725008, + "grad_norm": 0.00018277636263519526, + "learning_rate": 1.9147954546676675e-05, + "loss": 0.0, + "step": 11450 + }, + { + "epoch": 3.8193291265360347, + "grad_norm": 0.0033536076080054045, + "learning_rate": 1.904850531770256e-05, + "loss": 0.0, + "step": 11500 + }, + { + "epoch": 3.835934905347061, + "grad_norm": 0.00032338136225007474, + "learning_rate": 1.894905608872844e-05, + "loss": 0.0, + "step": 11550 + }, + { + "epoch": 3.852540684158087, + "grad_norm": 8.492634515278041e-05, + "learning_rate": 1.884960685975432e-05, + "loss": 0.0, + "step": 11600 + }, + { + "epoch": 3.8691464629691135, + "grad_norm": 0.00010078576451633126, + "learning_rate": 1.8750157630780202e-05, + "loss": 0.0, + "step": 11650 + }, + { + "epoch": 3.8857522417801396, + "grad_norm": 0.0001655027299420908, + "learning_rate": 1.8650708401806087e-05, + "loss": 0.0, + "step": 11700 + }, + { + "epoch": 3.9023580205911657, + "grad_norm": 0.00012061335291946307, + "learning_rate": 1.8551259172831968e-05, + "loss": 0.0, + "step": 11750 + }, + { + "epoch": 3.918963799402192, + "grad_norm": 0.00044828641694039106, + "learning_rate": 1.845180994385785e-05, + "loss": 0.0, + "step": 11800 + }, + { + "epoch": 3.9355695782132183, + "grad_norm": 5.701217378373258e-05, + "learning_rate": 1.835236071488373e-05, + "loss": 0.0, + "step": 11850 + }, + { + "epoch": 3.9521753570242444, + "grad_norm": 5.7118391850963235e-05, + "learning_rate": 1.8252911485909615e-05, + "loss": 0.0, + "step": 11900 + }, + { + "epoch": 3.968781135835271, + "grad_norm": 0.0007707001641392708, + "learning_rate": 1.8153462256935496e-05, + "loss": 0.0001, + "step": 11950 + }, + { + "epoch": 3.985386914646297, + "grad_norm": 6.149195542093366e-05, + "learning_rate": 1.8054013027961377e-05, + "loss": 0.0, + "step": 12000 + }, + { + "epoch": 4.0, + "eval_accuracy": 0.9973759541984732, + "eval_f1": 0.9973482118175944, + "eval_loss": 0.03656009957194328, + "eval_precision": 0.9973582002448674, + "eval_recall": 0.9973759541984732, + "eval_runtime": 67.5118, + "eval_samples_per_second": 124.186, + "eval_steps_per_second": 7.762, + "step": 12044 + }, + { + "epoch": 4.001992693457323, + "grad_norm": 5.8200610510539263e-05, + "learning_rate": 1.7954563798987258e-05, + "loss": 0.0, + "step": 12050 + }, + { + "epoch": 4.01859847226835, + "grad_norm": 2.329714880033862e-05, + "learning_rate": 1.7855114570013142e-05, + "loss": 0.0, + "step": 12100 + }, + { + "epoch": 4.035204251079375, + "grad_norm": 0.00018428650218993425, + "learning_rate": 1.7755665341039023e-05, + "loss": 0.0, + "step": 12150 + }, + { + "epoch": 4.051810029890402, + "grad_norm": 3.634762470028363e-05, + "learning_rate": 1.7656216112064904e-05, + "loss": 0.0047, + "step": 12200 + }, + { + "epoch": 4.068415808701428, + "grad_norm": 0.016488956287503242, + "learning_rate": 1.7556766883090785e-05, + "loss": 0.0185, + "step": 12250 + }, + { + "epoch": 4.085021587512454, + "grad_norm": 0.0005068861646577716, + "learning_rate": 1.745731765411667e-05, + "loss": 0.0002, + "step": 12300 + }, + { + "epoch": 4.101627366323481, + "grad_norm": 0.0002266648516524583, + "learning_rate": 1.735786842514255e-05, + "loss": 0.0001, + "step": 12350 + }, + { + "epoch": 4.118233145134507, + "grad_norm": 0.000158867915160954, + "learning_rate": 1.7258419196168435e-05, + "loss": 0.0001, + "step": 12400 + }, + { + "epoch": 4.134838923945533, + "grad_norm": 0.00020392602891661227, + "learning_rate": 1.7158969967194317e-05, + "loss": 0.0001, + "step": 12450 + }, + { + "epoch": 4.151444702756559, + "grad_norm": 7.127954449970275e-05, + "learning_rate": 1.7059520738220198e-05, + "loss": 0.0, + "step": 12500 + }, + { + "epoch": 4.168050481567586, + "grad_norm": 0.0027118742000311613, + "learning_rate": 1.696007150924608e-05, + "loss": 0.0005, + "step": 12550 + }, + { + "epoch": 4.1846562603786115, + "grad_norm": 0.00021424326405394822, + "learning_rate": 1.6860622280271963e-05, + "loss": 0.0001, + "step": 12600 + }, + { + "epoch": 4.201262039189638, + "grad_norm": 0.003089814679697156, + "learning_rate": 1.6761173051297844e-05, + "loss": 0.0136, + "step": 12650 + }, + { + "epoch": 4.217867818000665, + "grad_norm": 0.0032255896367132664, + "learning_rate": 1.6661723822323725e-05, + "loss": 0.0001, + "step": 12700 + }, + { + "epoch": 4.23447359681169, + "grad_norm": 0.00042820069938898087, + "learning_rate": 1.6562274593349606e-05, + "loss": 0.0, + "step": 12750 + }, + { + "epoch": 4.251079375622717, + "grad_norm": 0.0002473096828907728, + "learning_rate": 1.646282536437549e-05, + "loss": 0.0001, + "step": 12800 + }, + { + "epoch": 4.267685154433743, + "grad_norm": 0.00025876634754240513, + "learning_rate": 1.6363376135401372e-05, + "loss": 0.0069, + "step": 12850 + }, + { + "epoch": 4.284290933244769, + "grad_norm": 0.00011665420606732368, + "learning_rate": 1.6263926906427253e-05, + "loss": 0.0, + "step": 12900 + }, + { + "epoch": 4.3008967120557955, + "grad_norm": 9.87951279967092e-05, + "learning_rate": 1.6164477677453134e-05, + "loss": 0.0002, + "step": 12950 + }, + { + "epoch": 4.317502490866822, + "grad_norm": 0.0003025582409463823, + "learning_rate": 1.606502844847902e-05, + "loss": 0.0204, + "step": 13000 + }, + { + "epoch": 4.334108269677848, + "grad_norm": 0.002073385054245591, + "learning_rate": 1.59655792195049e-05, + "loss": 0.0209, + "step": 13050 + }, + { + "epoch": 4.350714048488874, + "grad_norm": 0.001544980681501329, + "learning_rate": 1.5866129990530784e-05, + "loss": 0.0014, + "step": 13100 + }, + { + "epoch": 4.367319827299901, + "grad_norm": 0.0011374803725630045, + "learning_rate": 1.5766680761556665e-05, + "loss": 0.0108, + "step": 13150 + }, + { + "epoch": 4.383925606110926, + "grad_norm": 0.0015389297623187304, + "learning_rate": 1.5667231532582546e-05, + "loss": 0.0001, + "step": 13200 + }, + { + "epoch": 4.400531384921953, + "grad_norm": 0.0011413008905947208, + "learning_rate": 1.5567782303608427e-05, + "loss": 0.0001, + "step": 13250 + }, + { + "epoch": 4.4171371637329795, + "grad_norm": 0.001274819835089147, + "learning_rate": 1.5468333074634312e-05, + "loss": 0.0001, + "step": 13300 + }, + { + "epoch": 4.433742942544005, + "grad_norm": 0.0003733636694960296, + "learning_rate": 1.5368883845660193e-05, + "loss": 0.0001, + "step": 13350 + }, + { + "epoch": 4.450348721355032, + "grad_norm": 0.00023891785531304777, + "learning_rate": 1.5269434616686074e-05, + "loss": 0.0, + "step": 13400 + }, + { + "epoch": 4.466954500166057, + "grad_norm": 0.0003116075531579554, + "learning_rate": 1.5169985387711957e-05, + "loss": 0.0, + "step": 13450 + }, + { + "epoch": 4.483560278977084, + "grad_norm": 0.0002987791958730668, + "learning_rate": 1.5070536158737838e-05, + "loss": 0.0007, + "step": 13500 + }, + { + "epoch": 4.50016605778811, + "grad_norm": 0.0006362134590744972, + "learning_rate": 1.497108692976372e-05, + "loss": 0.0115, + "step": 13550 + }, + { + "epoch": 4.516771836599137, + "grad_norm": 0.0006330082542262971, + "learning_rate": 1.4871637700789602e-05, + "loss": 0.0093, + "step": 13600 + }, + { + "epoch": 4.533377615410163, + "grad_norm": 0.0014981752028688788, + "learning_rate": 1.4772188471815484e-05, + "loss": 0.0076, + "step": 13650 + }, + { + "epoch": 4.549983394221189, + "grad_norm": 0.0010505125392228365, + "learning_rate": 1.4672739242841365e-05, + "loss": 0.0001, + "step": 13700 + }, + { + "epoch": 4.566589173032215, + "grad_norm": 0.0011327213142067194, + "learning_rate": 1.4573290013867248e-05, + "loss": 0.0, + "step": 13750 + }, + { + "epoch": 4.583194951843241, + "grad_norm": 0.0005059707909822464, + "learning_rate": 1.447384078489313e-05, + "loss": 0.0, + "step": 13800 + }, + { + "epoch": 4.599800730654268, + "grad_norm": 0.0011543643195182085, + "learning_rate": 1.437439155591901e-05, + "loss": 0.0, + "step": 13850 + }, + { + "epoch": 4.616406509465294, + "grad_norm": 0.00020546128507703543, + "learning_rate": 1.4274942326944895e-05, + "loss": 0.0, + "step": 13900 + }, + { + "epoch": 4.63301228827632, + "grad_norm": 0.0001845878578023985, + "learning_rate": 1.4175493097970778e-05, + "loss": 0.0, + "step": 13950 + }, + { + "epoch": 4.649618067087347, + "grad_norm": 0.00035237689735367894, + "learning_rate": 1.4076043868996659e-05, + "loss": 0.0, + "step": 14000 + }, + { + "epoch": 4.666223845898372, + "grad_norm": 0.00028683870914392173, + "learning_rate": 1.3976594640022541e-05, + "loss": 0.0, + "step": 14050 + }, + { + "epoch": 4.682829624709399, + "grad_norm": 0.0003122540074400604, + "learning_rate": 1.3877145411048423e-05, + "loss": 0.0, + "step": 14100 + }, + { + "epoch": 4.699435403520425, + "grad_norm": 0.00045101705472916365, + "learning_rate": 1.3777696182074305e-05, + "loss": 0.0002, + "step": 14150 + }, + { + "epoch": 4.716041182331451, + "grad_norm": 0.0011838871287181973, + "learning_rate": 1.3678246953100186e-05, + "loss": 0.0, + "step": 14200 + }, + { + "epoch": 4.7326469611424775, + "grad_norm": 0.00016272020002361387, + "learning_rate": 1.3578797724126069e-05, + "loss": 0.003, + "step": 14250 + }, + { + "epoch": 4.749252739953504, + "grad_norm": 0.00011718720634235069, + "learning_rate": 1.347934849515195e-05, + "loss": 0.0, + "step": 14300 + }, + { + "epoch": 4.76585851876453, + "grad_norm": 8.695230644661933e-05, + "learning_rate": 1.3379899266177833e-05, + "loss": 0.0, + "step": 14350 + }, + { + "epoch": 4.782464297575556, + "grad_norm": 6.504805787699297e-05, + "learning_rate": 1.3280450037203714e-05, + "loss": 0.0, + "step": 14400 + }, + { + "epoch": 4.799070076386583, + "grad_norm": 0.0013819790910929441, + "learning_rate": 1.3181000808229597e-05, + "loss": 0.0, + "step": 14450 + }, + { + "epoch": 4.815675855197608, + "grad_norm": 8.097992395050824e-05, + "learning_rate": 1.3081551579255478e-05, + "loss": 0.0, + "step": 14500 + }, + { + "epoch": 4.832281634008635, + "grad_norm": 6.052408934920095e-05, + "learning_rate": 1.2982102350281359e-05, + "loss": 0.0, + "step": 14550 + }, + { + "epoch": 4.8488874128196615, + "grad_norm": 6.93105612299405e-05, + "learning_rate": 1.2882653121307242e-05, + "loss": 0.0, + "step": 14600 + }, + { + "epoch": 4.865493191630687, + "grad_norm": 6.091671093599871e-05, + "learning_rate": 1.2783203892333123e-05, + "loss": 0.0, + "step": 14650 + }, + { + "epoch": 4.882098970441714, + "grad_norm": 0.00023341408814303577, + "learning_rate": 1.2683754663359007e-05, + "loss": 0.0, + "step": 14700 + }, + { + "epoch": 4.89870474925274, + "grad_norm": 4.871335477218963e-05, + "learning_rate": 1.258430543438489e-05, + "loss": 0.0, + "step": 14750 + }, + { + "epoch": 4.915310528063766, + "grad_norm": 0.0001195693839690648, + "learning_rate": 1.2484856205410771e-05, + "loss": 0.0007, + "step": 14800 + }, + { + "epoch": 4.931916306874792, + "grad_norm": 0.0006585444789379835, + "learning_rate": 1.2385406976436654e-05, + "loss": 0.0001, + "step": 14850 + }, + { + "epoch": 4.948522085685819, + "grad_norm": 0.0009381878189742565, + "learning_rate": 1.2285957747462535e-05, + "loss": 0.0136, + "step": 14900 + }, + { + "epoch": 4.965127864496845, + "grad_norm": 0.0007571703754365444, + "learning_rate": 1.2186508518488418e-05, + "loss": 0.0001, + "step": 14950 + }, + { + "epoch": 4.981733643307871, + "grad_norm": 0.0002012668555835262, + "learning_rate": 1.2087059289514299e-05, + "loss": 0.0, + "step": 15000 + }, + { + "epoch": 4.998339422118898, + "grad_norm": 0.0004453076981008053, + "learning_rate": 1.1987610060540182e-05, + "loss": 0.0011, + "step": 15050 + }, + { + "epoch": 5.0, + "eval_accuracy": 0.997256679389313, + "eval_f1": 0.9972255532951301, + "eval_loss": 0.026163995265960693, + "eval_precision": 0.997237804162915, + "eval_recall": 0.997256679389313, + "eval_runtime": 67.7679, + "eval_samples_per_second": 123.716, + "eval_steps_per_second": 7.732, + "step": 15055 + } + ], + "logging_steps": 50, + "max_steps": 21077, + "num_input_tokens_seen": 0, + "num_train_epochs": 7, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 2.547123652512768e+17, + "train_batch_size": 16, + "trial_name": null, + "trial_params": null +} diff --git a/trial-1/checkpoint-15055/training_args.bin b/trial-1/checkpoint-15055/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..4f2c1bfa6275d988daf06d913f47017474987a9d --- /dev/null +++ b/trial-1/checkpoint-15055/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:535f8963c9933a98cbda6a7a60fb42b0daa9affc10889961579027cb42dfd7b9 +size 5368 diff --git a/trial-2/checkpoint-9033/config.json b/trial-2/checkpoint-9033/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/trial-2/checkpoint-9033/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/trial-2/checkpoint-9033/model.safetensors b/trial-2/checkpoint-9033/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..372134e50d8e085c56957723c36f5d2e6158a76d --- /dev/null +++ b/trial-2/checkpoint-9033/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ceb5aea83d31becf51f33bad0101af19aa72e30825571029e2058839c2598942 +size 1583351632 diff --git a/trial-2/checkpoint-9033/optimizer.pt b/trial-2/checkpoint-9033/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..6bdae946c4d67825c56dc163682560dd38bebbd8 --- /dev/null +++ b/trial-2/checkpoint-9033/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d58ab59719b06a08e5012236adc8ead9857b2a6b939a9df2e7ad1fd2ea7f3856 +size 3166813178 diff --git a/trial-2/checkpoint-9033/rng_state.pth b/trial-2/checkpoint-9033/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..8fa2bb3a5065461ce1de67b81de6021f34363480 --- /dev/null +++ b/trial-2/checkpoint-9033/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:000f96e69b5b01f915aefc67b0e71455a9632e90db9f36791eb61370a67ffd58 +size 14244 diff --git a/trial-2/checkpoint-9033/scheduler.pt b/trial-2/checkpoint-9033/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..40c34b796ff4d0b4171aa5c3e98be6680b5a94d7 --- /dev/null +++ b/trial-2/checkpoint-9033/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef3fd896aed775d6359ff0434e2911a0c84bd73dffb2a8c635b32b0f9e87d938 +size 1064 diff --git a/trial-2/checkpoint-9033/trainer_state.json b/trial-2/checkpoint-9033/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..c47490f0a01f8eb1f1c3a25f673e32d251974e74 --- /dev/null +++ b/trial-2/checkpoint-9033/trainer_state.json @@ -0,0 +1,1329 @@ +{ + "best_metric": 0.017316868528723717, + "best_model_checkpoint": "./results/answerdotai/ModernBERT-large/trial-2/checkpoint-9033", + "epoch": 3.0, + "eval_steps": 500, + "global_step": 9033, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.016605778811026237, + "grad_norm": 21.98246192932129, + "learning_rate": 4.176603346793102e-05, + "loss": 0.3038, + "step": 50 + }, + { + "epoch": 0.033211557622052475, + "grad_norm": 0.019962424412369728, + "learning_rate": 4.162685974761136e-05, + "loss": 0.1339, + "step": 100 + }, + { + "epoch": 0.04981733643307871, + "grad_norm": 4.160129070281982, + "learning_rate": 4.1487686027291694e-05, + "loss": 0.1214, + "step": 150 + }, + { + "epoch": 0.06642311524410495, + "grad_norm": 0.058199040591716766, + "learning_rate": 4.1348512306972037e-05, + "loss": 0.0991, + "step": 200 + }, + { + "epoch": 0.08302889405513118, + "grad_norm": 4.1636199951171875, + "learning_rate": 4.120933858665237e-05, + "loss": 0.0483, + "step": 250 + }, + { + "epoch": 0.09963467286615742, + "grad_norm": 0.009575674310326576, + "learning_rate": 4.107016486633271e-05, + "loss": 0.0655, + "step": 300 + }, + { + "epoch": 0.11624045167718366, + "grad_norm": 0.005853955168277025, + "learning_rate": 4.0930991146013044e-05, + "loss": 0.0417, + "step": 350 + }, + { + "epoch": 0.1328462304882099, + "grad_norm": 0.22517672181129456, + "learning_rate": 4.079181742569338e-05, + "loss": 0.0559, + "step": 400 + }, + { + "epoch": 0.14945200929923613, + "grad_norm": 2.013570785522461, + "learning_rate": 4.0652643705373716e-05, + "loss": 0.0295, + "step": 450 + }, + { + "epoch": 0.16605778811026237, + "grad_norm": 0.06311897933483124, + "learning_rate": 4.051346998505405e-05, + "loss": 0.0683, + "step": 500 + }, + { + "epoch": 0.1826635669212886, + "grad_norm": 0.5382233262062073, + "learning_rate": 4.037429626473439e-05, + "loss": 0.03, + "step": 550 + }, + { + "epoch": 0.19926934573231483, + "grad_norm": 1.0302859544754028, + "learning_rate": 4.0235122544414723e-05, + "loss": 0.0502, + "step": 600 + }, + { + "epoch": 0.2158751245433411, + "grad_norm": 5.7028913497924805, + "learning_rate": 4.009594882409506e-05, + "loss": 0.0429, + "step": 650 + }, + { + "epoch": 0.23248090335436733, + "grad_norm": 0.05518786981701851, + "learning_rate": 3.9956775103775395e-05, + "loss": 0.0384, + "step": 700 + }, + { + "epoch": 0.24908668216539356, + "grad_norm": 0.0032245127949863672, + "learning_rate": 3.981760138345573e-05, + "loss": 0.048, + "step": 750 + }, + { + "epoch": 0.2656924609764198, + "grad_norm": 0.12157031893730164, + "learning_rate": 3.967842766313607e-05, + "loss": 0.0739, + "step": 800 + }, + { + "epoch": 0.282298239787446, + "grad_norm": 1.955265998840332, + "learning_rate": 3.95392539428164e-05, + "loss": 0.0607, + "step": 850 + }, + { + "epoch": 0.29890401859847227, + "grad_norm": 0.03681463748216629, + "learning_rate": 3.9400080222496745e-05, + "loss": 0.0469, + "step": 900 + }, + { + "epoch": 0.3155097974094985, + "grad_norm": 0.00883490964770317, + "learning_rate": 3.926090650217708e-05, + "loss": 0.0251, + "step": 950 + }, + { + "epoch": 0.33211557622052473, + "grad_norm": 0.017260171473026276, + "learning_rate": 3.912173278185742e-05, + "loss": 0.0274, + "step": 1000 + }, + { + "epoch": 0.348721355031551, + "grad_norm": 0.0017717696027830243, + "learning_rate": 3.898255906153775e-05, + "loss": 0.0603, + "step": 1050 + }, + { + "epoch": 0.3653271338425772, + "grad_norm": 0.03903853893280029, + "learning_rate": 3.884338534121809e-05, + "loss": 0.0378, + "step": 1100 + }, + { + "epoch": 0.38193291265360346, + "grad_norm": 0.002946289489045739, + "learning_rate": 3.8704211620898425e-05, + "loss": 0.0362, + "step": 1150 + }, + { + "epoch": 0.39853869146462967, + "grad_norm": 0.013737122528254986, + "learning_rate": 3.856503790057876e-05, + "loss": 0.048, + "step": 1200 + }, + { + "epoch": 0.41514447027565593, + "grad_norm": 0.22858883440494537, + "learning_rate": 3.84258641802591e-05, + "loss": 0.0195, + "step": 1250 + }, + { + "epoch": 0.4317502490866822, + "grad_norm": 0.605839729309082, + "learning_rate": 3.828669045993944e-05, + "loss": 0.0422, + "step": 1300 + }, + { + "epoch": 0.4483560278977084, + "grad_norm": 0.045106545090675354, + "learning_rate": 3.8147516739619775e-05, + "loss": 0.0303, + "step": 1350 + }, + { + "epoch": 0.46496180670873466, + "grad_norm": 0.0033422100823372602, + "learning_rate": 3.800834301930011e-05, + "loss": 0.0109, + "step": 1400 + }, + { + "epoch": 0.48156758551976087, + "grad_norm": 0.04552963748574257, + "learning_rate": 3.786916929898045e-05, + "loss": 0.02, + "step": 1450 + }, + { + "epoch": 0.4981733643307871, + "grad_norm": 0.0007607729057781398, + "learning_rate": 3.7729995578660776e-05, + "loss": 0.0283, + "step": 1500 + }, + { + "epoch": 0.5147791431418134, + "grad_norm": 0.011046077124774456, + "learning_rate": 3.759082185834111e-05, + "loss": 0.0249, + "step": 1550 + }, + { + "epoch": 0.5313849219528396, + "grad_norm": 0.1515175700187683, + "learning_rate": 3.7451648138021454e-05, + "loss": 0.0128, + "step": 1600 + }, + { + "epoch": 0.5479907007638658, + "grad_norm": 0.0051905689761042595, + "learning_rate": 3.731247441770179e-05, + "loss": 0.0235, + "step": 1650 + }, + { + "epoch": 0.564596479574892, + "grad_norm": 0.03131992742419243, + "learning_rate": 3.7173300697382126e-05, + "loss": 0.0017, + "step": 1700 + }, + { + "epoch": 0.5812022583859183, + "grad_norm": 50.02948760986328, + "learning_rate": 3.703412697706246e-05, + "loss": 0.0119, + "step": 1750 + }, + { + "epoch": 0.5978080371969445, + "grad_norm": 0.017883650958538055, + "learning_rate": 3.68949532567428e-05, + "loss": 0.0374, + "step": 1800 + }, + { + "epoch": 0.6144138160079707, + "grad_norm": 0.002184309996664524, + "learning_rate": 3.6755779536423134e-05, + "loss": 0.0258, + "step": 1850 + }, + { + "epoch": 0.631019594818997, + "grad_norm": 0.009681240655481815, + "learning_rate": 3.661660581610347e-05, + "loss": 0.0246, + "step": 1900 + }, + { + "epoch": 0.6476253736300233, + "grad_norm": 0.0064963954500854015, + "learning_rate": 3.647743209578381e-05, + "loss": 0.0076, + "step": 1950 + }, + { + "epoch": 0.6642311524410495, + "grad_norm": 0.0018849828047677875, + "learning_rate": 3.633825837546415e-05, + "loss": 0.0146, + "step": 2000 + }, + { + "epoch": 0.6808369312520757, + "grad_norm": 0.0013440287439152598, + "learning_rate": 3.6199084655144484e-05, + "loss": 0.0042, + "step": 2050 + }, + { + "epoch": 0.697442710063102, + "grad_norm": 0.0016228831373155117, + "learning_rate": 3.605991093482482e-05, + "loss": 0.0136, + "step": 2100 + }, + { + "epoch": 0.7140484888741282, + "grad_norm": 0.0009898327989503741, + "learning_rate": 3.5920737214505156e-05, + "loss": 0.0187, + "step": 2150 + }, + { + "epoch": 0.7306542676851544, + "grad_norm": 0.001361815258860588, + "learning_rate": 3.578156349418549e-05, + "loss": 0.0167, + "step": 2200 + }, + { + "epoch": 0.7472600464961807, + "grad_norm": 0.004233693704009056, + "learning_rate": 3.564238977386583e-05, + "loss": 0.0142, + "step": 2250 + }, + { + "epoch": 0.7638658253072069, + "grad_norm": 0.002790838712826371, + "learning_rate": 3.550321605354616e-05, + "loss": 0.0235, + "step": 2300 + }, + { + "epoch": 0.7804716041182331, + "grad_norm": 0.005341578274965286, + "learning_rate": 3.53640423332265e-05, + "loss": 0.002, + "step": 2350 + }, + { + "epoch": 0.7970773829292593, + "grad_norm": 0.011184672825038433, + "learning_rate": 3.5224868612906835e-05, + "loss": 0.0325, + "step": 2400 + }, + { + "epoch": 0.8136831617402857, + "grad_norm": 0.001378358923830092, + "learning_rate": 3.508569489258717e-05, + "loss": 0.0012, + "step": 2450 + }, + { + "epoch": 0.8302889405513119, + "grad_norm": 0.0006328076124191284, + "learning_rate": 3.4946521172267507e-05, + "loss": 0.0028, + "step": 2500 + }, + { + "epoch": 0.8468947193623381, + "grad_norm": 0.003556261071935296, + "learning_rate": 3.480734745194784e-05, + "loss": 0.0103, + "step": 2550 + }, + { + "epoch": 0.8635004981733644, + "grad_norm": 0.09675312787294388, + "learning_rate": 3.466817373162818e-05, + "loss": 0.0154, + "step": 2600 + }, + { + "epoch": 0.8801062769843906, + "grad_norm": 0.0008714852156117558, + "learning_rate": 3.452900001130852e-05, + "loss": 0.0098, + "step": 2650 + }, + { + "epoch": 0.8967120557954168, + "grad_norm": 0.00011318879842292517, + "learning_rate": 3.438982629098886e-05, + "loss": 0.0007, + "step": 2700 + }, + { + "epoch": 0.913317834606443, + "grad_norm": 0.0007046264945529401, + "learning_rate": 3.425065257066919e-05, + "loss": 0.0163, + "step": 2750 + }, + { + "epoch": 0.9299236134174693, + "grad_norm": 0.0003467966744210571, + "learning_rate": 3.411147885034953e-05, + "loss": 0.0087, + "step": 2800 + }, + { + "epoch": 0.9465293922284955, + "grad_norm": 0.002381992759183049, + "learning_rate": 3.3972305130029864e-05, + "loss": 0.025, + "step": 2850 + }, + { + "epoch": 0.9631351710395217, + "grad_norm": 3.5343263149261475, + "learning_rate": 3.38331314097102e-05, + "loss": 0.0066, + "step": 2900 + }, + { + "epoch": 0.9797409498505479, + "grad_norm": 1.6730190515518188, + "learning_rate": 3.369395768939054e-05, + "loss": 0.028, + "step": 2950 + }, + { + "epoch": 0.9963467286615743, + "grad_norm": 0.006649247836321592, + "learning_rate": 3.355478396907088e-05, + "loss": 0.001, + "step": 3000 + }, + { + "epoch": 1.0, + "eval_accuracy": 0.9877146946564885, + "eval_f1": 0.9883925037796503, + "eval_loss": 0.056414589285850525, + "eval_precision": 0.9900276560214059, + "eval_recall": 0.9877146946564885, + "eval_runtime": 66.3519, + "eval_samples_per_second": 126.357, + "eval_steps_per_second": 7.897, + "step": 3011 + }, + { + "epoch": 1.0129525074726005, + "grad_norm": 0.03574318438768387, + "learning_rate": 3.3415610248751215e-05, + "loss": 0.0166, + "step": 3050 + }, + { + "epoch": 1.0295582862836268, + "grad_norm": 0.0022263473365455866, + "learning_rate": 3.327643652843155e-05, + "loss": 0.004, + "step": 3100 + }, + { + "epoch": 1.0461640650946529, + "grad_norm": 0.0038769582752138376, + "learning_rate": 3.3137262808111886e-05, + "loss": 0.0003, + "step": 3150 + }, + { + "epoch": 1.0627698439056792, + "grad_norm": 0.005572175141423941, + "learning_rate": 3.299808908779222e-05, + "loss": 0.001, + "step": 3200 + }, + { + "epoch": 1.0793756227167055, + "grad_norm": 0.1954014003276825, + "learning_rate": 3.285891536747256e-05, + "loss": 0.0323, + "step": 3250 + }, + { + "epoch": 1.0959814015277316, + "grad_norm": 0.0821860134601593, + "learning_rate": 3.2719741647152894e-05, + "loss": 0.0035, + "step": 3300 + }, + { + "epoch": 1.112587180338758, + "grad_norm": 0.04094453528523445, + "learning_rate": 3.258056792683323e-05, + "loss": 0.0025, + "step": 3350 + }, + { + "epoch": 1.1291929591497842, + "grad_norm": 0.0028018904849886894, + "learning_rate": 3.2441394206513566e-05, + "loss": 0.0092, + "step": 3400 + }, + { + "epoch": 1.1457987379608103, + "grad_norm": 0.002664918312802911, + "learning_rate": 3.23022204861939e-05, + "loss": 0.0194, + "step": 3450 + }, + { + "epoch": 1.1624045167718366, + "grad_norm": 6.756401538848877, + "learning_rate": 3.216304676587424e-05, + "loss": 0.0135, + "step": 3500 + }, + { + "epoch": 1.1790102955828627, + "grad_norm": 0.0011647256324067712, + "learning_rate": 3.202387304555457e-05, + "loss": 0.0009, + "step": 3550 + }, + { + "epoch": 1.195616074393889, + "grad_norm": 0.0007124203257262707, + "learning_rate": 3.188469932523491e-05, + "loss": 0.0004, + "step": 3600 + }, + { + "epoch": 1.2122218532049154, + "grad_norm": 0.00033606469514779747, + "learning_rate": 3.174552560491525e-05, + "loss": 0.0, + "step": 3650 + }, + { + "epoch": 1.2288276320159415, + "grad_norm": 0.0005367195117287338, + "learning_rate": 3.160635188459559e-05, + "loss": 0.0001, + "step": 3700 + }, + { + "epoch": 1.2454334108269678, + "grad_norm": 0.012845808640122414, + "learning_rate": 3.1467178164275924e-05, + "loss": 0.0132, + "step": 3750 + }, + { + "epoch": 1.2620391896379939, + "grad_norm": 0.0020410455763339996, + "learning_rate": 3.132800444395626e-05, + "loss": 0.0006, + "step": 3800 + }, + { + "epoch": 1.2786449684490202, + "grad_norm": 0.0007005564984865487, + "learning_rate": 3.1188830723636595e-05, + "loss": 0.0074, + "step": 3850 + }, + { + "epoch": 1.2952507472600465, + "grad_norm": 0.00040667568100616336, + "learning_rate": 3.104965700331693e-05, + "loss": 0.004, + "step": 3900 + }, + { + "epoch": 1.3118565260710726, + "grad_norm": 0.0005291103734634817, + "learning_rate": 3.091048328299727e-05, + "loss": 0.0035, + "step": 3950 + }, + { + "epoch": 1.328462304882099, + "grad_norm": 0.01364582683891058, + "learning_rate": 3.077130956267761e-05, + "loss": 0.0157, + "step": 4000 + }, + { + "epoch": 1.3450680836931252, + "grad_norm": 4.155655860900879, + "learning_rate": 3.0632135842357946e-05, + "loss": 0.015, + "step": 4050 + }, + { + "epoch": 1.3616738625041513, + "grad_norm": 0.22104530036449432, + "learning_rate": 3.0492962122038278e-05, + "loss": 0.0118, + "step": 4100 + }, + { + "epoch": 1.3782796413151777, + "grad_norm": 0.027948148548603058, + "learning_rate": 3.0353788401718614e-05, + "loss": 0.0238, + "step": 4150 + }, + { + "epoch": 1.394885420126204, + "grad_norm": 0.0006893076351843774, + "learning_rate": 3.021461468139895e-05, + "loss": 0.0001, + "step": 4200 + }, + { + "epoch": 1.41149119893723, + "grad_norm": 0.00040428817737847567, + "learning_rate": 3.0075440961079286e-05, + "loss": 0.0003, + "step": 4250 + }, + { + "epoch": 1.4280969777482564, + "grad_norm": 0.13213320076465607, + "learning_rate": 2.993626724075962e-05, + "loss": 0.0192, + "step": 4300 + }, + { + "epoch": 1.4447027565592827, + "grad_norm": 0.010131658986210823, + "learning_rate": 2.979709352043996e-05, + "loss": 0.0114, + "step": 4350 + }, + { + "epoch": 1.4613085353703088, + "grad_norm": 0.0037651287857443094, + "learning_rate": 2.9657919800120297e-05, + "loss": 0.0311, + "step": 4400 + }, + { + "epoch": 1.4779143141813351, + "grad_norm": 0.002042593201622367, + "learning_rate": 2.9518746079800632e-05, + "loss": 0.0004, + "step": 4450 + }, + { + "epoch": 1.4945200929923614, + "grad_norm": 0.0017547437455505133, + "learning_rate": 2.9379572359480968e-05, + "loss": 0.0003, + "step": 4500 + }, + { + "epoch": 1.5111258718033875, + "grad_norm": 0.0004074271419085562, + "learning_rate": 2.9240398639161304e-05, + "loss": 0.0, + "step": 4550 + }, + { + "epoch": 1.5277316506144138, + "grad_norm": 0.0007494412711821496, + "learning_rate": 2.910122491884164e-05, + "loss": 0.0, + "step": 4600 + }, + { + "epoch": 1.5443374294254402, + "grad_norm": 0.0027854584623128176, + "learning_rate": 2.8962051198521976e-05, + "loss": 0.0101, + "step": 4650 + }, + { + "epoch": 1.5609432082364663, + "grad_norm": 0.0018231570720672607, + "learning_rate": 2.882287747820232e-05, + "loss": 0.0038, + "step": 4700 + }, + { + "epoch": 1.5775489870474926, + "grad_norm": 0.0018910124199464917, + "learning_rate": 2.8683703757882654e-05, + "loss": 0.0101, + "step": 4750 + }, + { + "epoch": 1.594154765858519, + "grad_norm": 0.02916385605931282, + "learning_rate": 2.854453003756299e-05, + "loss": 0.0116, + "step": 4800 + }, + { + "epoch": 1.610760544669545, + "grad_norm": 0.000494068895932287, + "learning_rate": 2.8405356317243326e-05, + "loss": 0.0003, + "step": 4850 + }, + { + "epoch": 1.627366323480571, + "grad_norm": 0.003024607663974166, + "learning_rate": 2.8266182596923662e-05, + "loss": 0.0017, + "step": 4900 + }, + { + "epoch": 1.6439721022915976, + "grad_norm": 0.0006424608873203397, + "learning_rate": 2.8127008876603998e-05, + "loss": 0.0004, + "step": 4950 + }, + { + "epoch": 1.6605778811026237, + "grad_norm": 0.0013206731528043747, + "learning_rate": 2.7987835156284334e-05, + "loss": 0.0421, + "step": 5000 + }, + { + "epoch": 1.6771836599136498, + "grad_norm": 0.009919991716742516, + "learning_rate": 2.7848661435964673e-05, + "loss": 0.014, + "step": 5050 + }, + { + "epoch": 1.6937894387246761, + "grad_norm": 0.0010778785217553377, + "learning_rate": 2.770948771564501e-05, + "loss": 0.0111, + "step": 5100 + }, + { + "epoch": 1.7103952175357025, + "grad_norm": 0.048366744071245193, + "learning_rate": 2.7570313995325345e-05, + "loss": 0.0173, + "step": 5150 + }, + { + "epoch": 1.7270009963467285, + "grad_norm": 0.0018048906931653619, + "learning_rate": 2.743114027500568e-05, + "loss": 0.0004, + "step": 5200 + }, + { + "epoch": 1.7436067751577549, + "grad_norm": 0.0049881902523338795, + "learning_rate": 2.7291966554686016e-05, + "loss": 0.0055, + "step": 5250 + }, + { + "epoch": 1.7602125539687812, + "grad_norm": 0.035248152911663055, + "learning_rate": 2.7152792834366352e-05, + "loss": 0.0139, + "step": 5300 + }, + { + "epoch": 1.7768183327798073, + "grad_norm": 0.0055509209632873535, + "learning_rate": 2.7013619114046688e-05, + "loss": 0.0119, + "step": 5350 + }, + { + "epoch": 1.7934241115908336, + "grad_norm": 0.0015154307475313544, + "learning_rate": 2.6874445393727027e-05, + "loss": 0.0059, + "step": 5400 + }, + { + "epoch": 1.81002989040186, + "grad_norm": 0.002082501072436571, + "learning_rate": 2.6735271673407363e-05, + "loss": 0.0001, + "step": 5450 + }, + { + "epoch": 1.826635669212886, + "grad_norm": 0.0010320444125682116, + "learning_rate": 2.65960979530877e-05, + "loss": 0.0026, + "step": 5500 + }, + { + "epoch": 1.8432414480239123, + "grad_norm": 0.0015966288046911359, + "learning_rate": 2.6456924232768035e-05, + "loss": 0.0001, + "step": 5550 + }, + { + "epoch": 1.8598472268349386, + "grad_norm": 0.0002549632336013019, + "learning_rate": 2.631775051244837e-05, + "loss": 0.0, + "step": 5600 + }, + { + "epoch": 1.8764530056459647, + "grad_norm": 0.003573563415557146, + "learning_rate": 2.6178576792128707e-05, + "loss": 0.0119, + "step": 5650 + }, + { + "epoch": 1.893058784456991, + "grad_norm": 0.0006668689311482012, + "learning_rate": 2.6039403071809043e-05, + "loss": 0.0002, + "step": 5700 + }, + { + "epoch": 1.9096645632680174, + "grad_norm": 0.0003313050256110728, + "learning_rate": 2.5900229351489382e-05, + "loss": 0.0005, + "step": 5750 + }, + { + "epoch": 1.9262703420790435, + "grad_norm": 0.000735230278223753, + "learning_rate": 2.5761055631169718e-05, + "loss": 0.0033, + "step": 5800 + }, + { + "epoch": 1.9428761208900698, + "grad_norm": 0.0006316429935395718, + "learning_rate": 2.5621881910850054e-05, + "loss": 0.0, + "step": 5850 + }, + { + "epoch": 1.959481899701096, + "grad_norm": 0.07467895746231079, + "learning_rate": 2.548270819053039e-05, + "loss": 0.0239, + "step": 5900 + }, + { + "epoch": 1.9760876785121222, + "grad_norm": 0.0030303276143968105, + "learning_rate": 2.5343534470210725e-05, + "loss": 0.0051, + "step": 5950 + }, + { + "epoch": 1.9926934573231485, + "grad_norm": 0.028144309297204018, + "learning_rate": 2.520436074989106e-05, + "loss": 0.0411, + "step": 6000 + }, + { + "epoch": 2.0, + "eval_accuracy": 0.9971374045801527, + "eval_f1": 0.9970892145409969, + "eval_loss": 0.022991055622696877, + "eval_precision": 0.9971362128759713, + "eval_recall": 0.9971374045801527, + "eval_runtime": 67.1475, + "eval_samples_per_second": 124.859, + "eval_steps_per_second": 7.804, + "step": 6022 + }, + { + "epoch": 2.009299236134175, + "grad_norm": 0.01069187093526125, + "learning_rate": 2.5065187029571397e-05, + "loss": 0.0176, + "step": 6050 + }, + { + "epoch": 2.025905014945201, + "grad_norm": 0.002449960447847843, + "learning_rate": 2.492601330925174e-05, + "loss": 0.0034, + "step": 6100 + }, + { + "epoch": 2.042510793756227, + "grad_norm": 0.006188757251948118, + "learning_rate": 2.4786839588932076e-05, + "loss": 0.0001, + "step": 6150 + }, + { + "epoch": 2.0591165725672536, + "grad_norm": 0.0007646733429282904, + "learning_rate": 2.464766586861241e-05, + "loss": 0.0049, + "step": 6200 + }, + { + "epoch": 2.0757223513782797, + "grad_norm": 0.008807332254946232, + "learning_rate": 2.4508492148292747e-05, + "loss": 0.0108, + "step": 6250 + }, + { + "epoch": 2.0923281301893057, + "grad_norm": 0.0009894605027511716, + "learning_rate": 2.4369318427973083e-05, + "loss": 0.0002, + "step": 6300 + }, + { + "epoch": 2.1089339090003323, + "grad_norm": 0.0016475095180794597, + "learning_rate": 2.423014470765342e-05, + "loss": 0.0009, + "step": 6350 + }, + { + "epoch": 2.1255396878113584, + "grad_norm": 0.000374704715795815, + "learning_rate": 2.4090970987333755e-05, + "loss": 0.002, + "step": 6400 + }, + { + "epoch": 2.1421454666223845, + "grad_norm": 0.0004738509014714509, + "learning_rate": 2.3951797267014094e-05, + "loss": 0.0, + "step": 6450 + }, + { + "epoch": 2.158751245433411, + "grad_norm": 0.004183988086879253, + "learning_rate": 2.381262354669443e-05, + "loss": 0.0059, + "step": 6500 + }, + { + "epoch": 2.175357024244437, + "grad_norm": 0.0020687321666628122, + "learning_rate": 2.3673449826374766e-05, + "loss": 0.0079, + "step": 6550 + }, + { + "epoch": 2.191962803055463, + "grad_norm": 0.00045966755715198815, + "learning_rate": 2.35342761060551e-05, + "loss": 0.0, + "step": 6600 + }, + { + "epoch": 2.2085685818664897, + "grad_norm": 0.001768000889569521, + "learning_rate": 2.3395102385735438e-05, + "loss": 0.0, + "step": 6650 + }, + { + "epoch": 2.225174360677516, + "grad_norm": 0.00014257608563639224, + "learning_rate": 2.3255928665415773e-05, + "loss": 0.0, + "step": 6700 + }, + { + "epoch": 2.241780139488542, + "grad_norm": 0.0002034334756899625, + "learning_rate": 2.311675494509611e-05, + "loss": 0.0121, + "step": 6750 + }, + { + "epoch": 2.2583859182995685, + "grad_norm": 0.7104772329330444, + "learning_rate": 2.297758122477645e-05, + "loss": 0.0, + "step": 6800 + }, + { + "epoch": 2.2749916971105946, + "grad_norm": 0.0013439609901979566, + "learning_rate": 2.2838407504456784e-05, + "loss": 0.0116, + "step": 6850 + }, + { + "epoch": 2.2915974759216207, + "grad_norm": 0.003318699076771736, + "learning_rate": 2.269923378413712e-05, + "loss": 0.0001, + "step": 6900 + }, + { + "epoch": 2.308203254732647, + "grad_norm": 0.00031852992833592, + "learning_rate": 2.2560060063817456e-05, + "loss": 0.0065, + "step": 6950 + }, + { + "epoch": 2.3248090335436733, + "grad_norm": 0.00043174950405955315, + "learning_rate": 2.2420886343497792e-05, + "loss": 0.0, + "step": 7000 + }, + { + "epoch": 2.3414148123546994, + "grad_norm": 0.008213113993406296, + "learning_rate": 2.2281712623178128e-05, + "loss": 0.0164, + "step": 7050 + }, + { + "epoch": 2.3580205911657255, + "grad_norm": 0.0064167445525527, + "learning_rate": 2.2142538902858464e-05, + "loss": 0.0035, + "step": 7100 + }, + { + "epoch": 2.374626369976752, + "grad_norm": 0.00048106853500939906, + "learning_rate": 2.2003365182538803e-05, + "loss": 0.0002, + "step": 7150 + }, + { + "epoch": 2.391232148787778, + "grad_norm": 0.00031906799995340407, + "learning_rate": 2.186419146221914e-05, + "loss": 0.0032, + "step": 7200 + }, + { + "epoch": 2.407837927598804, + "grad_norm": 0.0002367593697272241, + "learning_rate": 2.1725017741899475e-05, + "loss": 0.0001, + "step": 7250 + }, + { + "epoch": 2.4244437064098308, + "grad_norm": 0.0002154409303329885, + "learning_rate": 2.158584402157981e-05, + "loss": 0.0, + "step": 7300 + }, + { + "epoch": 2.441049485220857, + "grad_norm": 0.004206398501992226, + "learning_rate": 2.1446670301260146e-05, + "loss": 0.0, + "step": 7350 + }, + { + "epoch": 2.457655264031883, + "grad_norm": 0.00027124237385578454, + "learning_rate": 2.1307496580940482e-05, + "loss": 0.0, + "step": 7400 + }, + { + "epoch": 2.4742610428429095, + "grad_norm": 0.00019591822638176382, + "learning_rate": 2.1168322860620818e-05, + "loss": 0.0, + "step": 7450 + }, + { + "epoch": 2.4908668216539356, + "grad_norm": 0.004273345228284597, + "learning_rate": 2.102914914030116e-05, + "loss": 0.0069, + "step": 7500 + }, + { + "epoch": 2.5074726004649617, + "grad_norm": 0.0011552508221939206, + "learning_rate": 2.0889975419981497e-05, + "loss": 0.0108, + "step": 7550 + }, + { + "epoch": 2.5240783792759878, + "grad_norm": 0.0009868694469332695, + "learning_rate": 2.0750801699661833e-05, + "loss": 0.0023, + "step": 7600 + }, + { + "epoch": 2.5406841580870143, + "grad_norm": 0.00020178337581455708, + "learning_rate": 2.061162797934217e-05, + "loss": 0.0131, + "step": 7650 + }, + { + "epoch": 2.5572899368980404, + "grad_norm": 0.005387285258620977, + "learning_rate": 2.0472454259022504e-05, + "loss": 0.0, + "step": 7700 + }, + { + "epoch": 2.5738957157090665, + "grad_norm": 5.118713670526631e-05, + "learning_rate": 2.033328053870284e-05, + "loss": 0.0001, + "step": 7750 + }, + { + "epoch": 2.590501494520093, + "grad_norm": 0.00040173486922867596, + "learning_rate": 2.0194106818383176e-05, + "loss": 0.0001, + "step": 7800 + }, + { + "epoch": 2.607107273331119, + "grad_norm": 6.548867531819269e-05, + "learning_rate": 2.0054933098063512e-05, + "loss": 0.0, + "step": 7850 + }, + { + "epoch": 2.6237130521421452, + "grad_norm": 0.00011618030839599669, + "learning_rate": 1.991575937774385e-05, + "loss": 0.0, + "step": 7900 + }, + { + "epoch": 2.6403188309531718, + "grad_norm": 3.7753208744106814e-05, + "learning_rate": 1.9776585657424187e-05, + "loss": 0.0, + "step": 7950 + }, + { + "epoch": 2.656924609764198, + "grad_norm": 0.00011960588017245755, + "learning_rate": 1.9637411937104523e-05, + "loss": 0.0, + "step": 8000 + }, + { + "epoch": 2.673530388575224, + "grad_norm": 0.0005026832805015147, + "learning_rate": 1.949823821678486e-05, + "loss": 0.0, + "step": 8050 + }, + { + "epoch": 2.6901361673862505, + "grad_norm": 5.5350832553813234e-05, + "learning_rate": 1.9359064496465198e-05, + "loss": 0.0, + "step": 8100 + }, + { + "epoch": 2.7067419461972766, + "grad_norm": 0.00011889787128893659, + "learning_rate": 1.9219890776145534e-05, + "loss": 0.006, + "step": 8150 + }, + { + "epoch": 2.7233477250083027, + "grad_norm": 0.001216597855091095, + "learning_rate": 1.908071705582587e-05, + "loss": 0.0, + "step": 8200 + }, + { + "epoch": 2.7399535038193292, + "grad_norm": 0.000991505105048418, + "learning_rate": 1.8941543335506206e-05, + "loss": 0.028, + "step": 8250 + }, + { + "epoch": 2.7565592826303553, + "grad_norm": 0.0008777762413956225, + "learning_rate": 1.880236961518654e-05, + "loss": 0.0017, + "step": 8300 + }, + { + "epoch": 2.7731650614413814, + "grad_norm": 0.0007498575723730028, + "learning_rate": 1.8663195894866877e-05, + "loss": 0.0001, + "step": 8350 + }, + { + "epoch": 2.789770840252408, + "grad_norm": 0.0012167900567874312, + "learning_rate": 1.8524022174547213e-05, + "loss": 0.0, + "step": 8400 + }, + { + "epoch": 2.806376619063434, + "grad_norm": 0.007960589602589607, + "learning_rate": 1.8384848454227552e-05, + "loss": 0.0, + "step": 8450 + }, + { + "epoch": 2.82298239787446, + "grad_norm": 0.00019711998174898326, + "learning_rate": 1.8245674733907888e-05, + "loss": 0.0, + "step": 8500 + }, + { + "epoch": 2.8395881766854867, + "grad_norm": 0.009689416736364365, + "learning_rate": 1.8106501013588224e-05, + "loss": 0.012, + "step": 8550 + }, + { + "epoch": 2.856193955496513, + "grad_norm": 0.0010081271175295115, + "learning_rate": 1.796732729326856e-05, + "loss": 0.0001, + "step": 8600 + }, + { + "epoch": 2.872799734307539, + "grad_norm": 0.0037073129788041115, + "learning_rate": 1.7828153572948896e-05, + "loss": 0.0037, + "step": 8650 + }, + { + "epoch": 2.8894055131185654, + "grad_norm": 0.000657514261547476, + "learning_rate": 1.7688979852629232e-05, + "loss": 0.0005, + "step": 8700 + }, + { + "epoch": 2.9060112919295915, + "grad_norm": 0.0041756597347557545, + "learning_rate": 1.7549806132309568e-05, + "loss": 0.0001, + "step": 8750 + }, + { + "epoch": 2.9226170707406176, + "grad_norm": 0.0014196323463693261, + "learning_rate": 1.7410632411989907e-05, + "loss": 0.0051, + "step": 8800 + }, + { + "epoch": 2.939222849551644, + "grad_norm": 0.001023727236315608, + "learning_rate": 1.7271458691670243e-05, + "loss": 0.0112, + "step": 8850 + }, + { + "epoch": 2.9558286283626702, + "grad_norm": 0.0005508707836270332, + "learning_rate": 1.713228497135058e-05, + "loss": 0.0001, + "step": 8900 + }, + { + "epoch": 2.9724344071736963, + "grad_norm": 0.0005536659737117589, + "learning_rate": 1.6993111251030918e-05, + "loss": 0.0, + "step": 8950 + }, + { + "epoch": 2.989040185984723, + "grad_norm": 0.002692180685698986, + "learning_rate": 1.6853937530711254e-05, + "loss": 0.0018, + "step": 9000 + }, + { + "epoch": 3.0, + "eval_accuracy": 0.9977337786259542, + "eval_f1": 0.997711567093862, + "eval_loss": 0.017316868528723717, + "eval_precision": 0.9977219427584941, + "eval_recall": 0.9977337786259542, + "eval_runtime": 67.8318, + "eval_samples_per_second": 123.6, + "eval_steps_per_second": 7.725, + "step": 9033 + } + ], + "logging_steps": 50, + "max_steps": 15055, + "num_input_tokens_seen": 0, + "num_train_epochs": 5, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 1.5282741915076608e+17, + "train_batch_size": 16, + "trial_name": null, + "trial_params": null +} diff --git a/trial-2/checkpoint-9033/training_args.bin b/trial-2/checkpoint-9033/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..7d5a973dc4304c5441b9f9ca14c6b5ad185baf40 --- /dev/null +++ b/trial-2/checkpoint-9033/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31b16aa5b08a9764ff6ccb4b3d56d2656dee1972624cbbcf01b67cb965ad3e06 +size 5368 diff --git a/trial-3/checkpoint-1506/config.json b/trial-3/checkpoint-1506/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/trial-3/checkpoint-1506/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/trial-3/checkpoint-1506/model.safetensors b/trial-3/checkpoint-1506/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6481e8bc250293fe160923401a8205a766e50666 --- /dev/null +++ b/trial-3/checkpoint-1506/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2633ff2f035355d1f491e943541f26f2b553f8bb1f21a963431b246a1dac02a3 +size 1583351632 diff --git a/trial-3/checkpoint-1506/optimizer.pt b/trial-3/checkpoint-1506/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..b0375d3f66f7f5c65db1d721f8199d6ee0492b65 --- /dev/null +++ b/trial-3/checkpoint-1506/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dbd147ad035350b88e1d53f3cf901e1b9d28549304b83bb06a4cc02825d41cf +size 3166813178 diff --git a/trial-3/checkpoint-1506/rng_state.pth b/trial-3/checkpoint-1506/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..cf3d91c5392ca6b7d7e0880933b7830a896d7c9e --- /dev/null +++ b/trial-3/checkpoint-1506/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:568428d80a25211a390c359ca51b0b20b38ca0607fbc196f106c9841c02d3e59 +size 14244 diff --git a/trial-3/checkpoint-1506/scheduler.pt b/trial-3/checkpoint-1506/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..931d23fe49277908402bf430d4ec99e12126031a --- /dev/null +++ b/trial-3/checkpoint-1506/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baefd0e97bcced8a792772527e556de3be3ee540a7e8c67d6914b57db907793c +size 1064 diff --git a/trial-3/checkpoint-1506/trainer_state.json b/trial-3/checkpoint-1506/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..bb7f590b711e8c193446182fbd94fae6a2dedb55 --- /dev/null +++ b/trial-3/checkpoint-1506/trainer_state.json @@ -0,0 +1,255 @@ +{ + "best_metric": 0.029849544167518616, + "best_model_checkpoint": "./results/answerdotai/ModernBERT-large/trial-3/checkpoint-1506", + "epoch": 1.0, + "eval_steps": 500, + "global_step": 1506, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.033200531208499334, + "grad_norm": 23.8559627532959, + "learning_rate": 4.534517164997864e-06, + "loss": 0.4406, + "step": 50 + }, + { + "epoch": 0.06640106241699867, + "grad_norm": 6.17399787902832, + "learning_rate": 4.51562019398187e-06, + "loss": 0.1819, + "step": 100 + }, + { + "epoch": 0.099601593625498, + "grad_norm": 0.49522921442985535, + "learning_rate": 4.496723222965876e-06, + "loss": 0.107, + "step": 150 + }, + { + "epoch": 0.13280212483399734, + "grad_norm": 7.754148960113525, + "learning_rate": 4.477826251949882e-06, + "loss": 0.0688, + "step": 200 + }, + { + "epoch": 0.16600265604249667, + "grad_norm": 0.5113905668258667, + "learning_rate": 4.458929280933889e-06, + "loss": 0.0495, + "step": 250 + }, + { + "epoch": 0.199203187250996, + "grad_norm": 4.4989800453186035, + "learning_rate": 4.440032309917895e-06, + "loss": 0.0415, + "step": 300 + }, + { + "epoch": 0.23240371845949534, + "grad_norm": 9.69454288482666, + "learning_rate": 4.421135338901901e-06, + "loss": 0.0663, + "step": 350 + }, + { + "epoch": 0.2656042496679947, + "grad_norm": 0.023875955492258072, + "learning_rate": 4.4022383678859074e-06, + "loss": 0.0373, + "step": 400 + }, + { + "epoch": 0.29880478087649404, + "grad_norm": 0.09503920376300812, + "learning_rate": 4.383341396869914e-06, + "loss": 0.0444, + "step": 450 + }, + { + "epoch": 0.33200531208499334, + "grad_norm": 0.008267635479569435, + "learning_rate": 4.36444442585392e-06, + "loss": 0.0286, + "step": 500 + }, + { + "epoch": 0.3652058432934927, + "grad_norm": 0.12851744890213013, + "learning_rate": 4.345547454837926e-06, + "loss": 0.0204, + "step": 550 + }, + { + "epoch": 0.398406374501992, + "grad_norm": 2.524458646774292, + "learning_rate": 4.3266504838219325e-06, + "loss": 0.0095, + "step": 600 + }, + { + "epoch": 0.4316069057104914, + "grad_norm": 1.337737798690796, + "learning_rate": 4.307753512805939e-06, + "loss": 0.0249, + "step": 650 + }, + { + "epoch": 0.4648074369189907, + "grad_norm": 0.08153943717479706, + "learning_rate": 4.288856541789945e-06, + "loss": 0.0092, + "step": 700 + }, + { + "epoch": 0.49800796812749004, + "grad_norm": 0.018035605549812317, + "learning_rate": 4.269959570773951e-06, + "loss": 0.0132, + "step": 750 + }, + { + "epoch": 0.5312084993359893, + "grad_norm": 0.22391293942928314, + "learning_rate": 4.251062599757957e-06, + "loss": 0.0103, + "step": 800 + }, + { + "epoch": 0.5644090305444888, + "grad_norm": 7.64361047744751, + "learning_rate": 4.232165628741963e-06, + "loss": 0.0099, + "step": 850 + }, + { + "epoch": 0.5976095617529881, + "grad_norm": 0.0029439961072057486, + "learning_rate": 4.213268657725969e-06, + "loss": 0.0099, + "step": 900 + }, + { + "epoch": 0.6308100929614874, + "grad_norm": 0.02843591570854187, + "learning_rate": 4.194371686709975e-06, + "loss": 0.0051, + "step": 950 + }, + { + "epoch": 0.6640106241699867, + "grad_norm": 0.21529018878936768, + "learning_rate": 4.175474715693982e-06, + "loss": 0.0165, + "step": 1000 + }, + { + "epoch": 0.6972111553784861, + "grad_norm": 0.0392189547419548, + "learning_rate": 4.156577744677988e-06, + "loss": 0.011, + "step": 1050 + }, + { + "epoch": 0.7304116865869854, + "grad_norm": 0.006516862660646439, + "learning_rate": 4.137680773661994e-06, + "loss": 0.002, + "step": 1100 + }, + { + "epoch": 0.7636122177954847, + "grad_norm": 0.0022248616442084312, + "learning_rate": 4.1187838026460004e-06, + "loss": 0.0117, + "step": 1150 + }, + { + "epoch": 0.796812749003984, + "grad_norm": 0.014311583712697029, + "learning_rate": 4.099886831630007e-06, + "loss": 0.0115, + "step": 1200 + }, + { + "epoch": 0.8300132802124834, + "grad_norm": 0.006024663802236319, + "learning_rate": 4.080989860614013e-06, + "loss": 0.0004, + "step": 1250 + }, + { + "epoch": 0.8632138114209827, + "grad_norm": 0.005676358472555876, + "learning_rate": 4.062092889598019e-06, + "loss": 0.0067, + "step": 1300 + }, + { + "epoch": 0.896414342629482, + "grad_norm": 0.0005013855989091098, + "learning_rate": 4.0431959185820255e-06, + "loss": 0.0008, + "step": 1350 + }, + { + "epoch": 0.9296148738379814, + "grad_norm": 0.0016384737100452185, + "learning_rate": 4.024298947566032e-06, + "loss": 0.001, + "step": 1400 + }, + { + "epoch": 0.9628154050464808, + "grad_norm": 3.2831873893737793, + "learning_rate": 4.005401976550038e-06, + "loss": 0.0015, + "step": 1450 + }, + { + "epoch": 0.9960159362549801, + "grad_norm": 0.036648038774728775, + "learning_rate": 3.986505005534044e-06, + "loss": 0.0061, + "step": 1500 + }, + { + "epoch": 1.0, + "eval_accuracy": 0.9970181297709924, + "eval_f1": 0.9969934810410721, + "eval_loss": 0.029849544167518616, + "eval_precision": 0.9969918072979542, + "eval_recall": 0.9970181297709924, + "eval_runtime": 59.3241, + "eval_samples_per_second": 141.325, + "eval_steps_per_second": 4.416, + "step": 1506 + } + ], + "logging_steps": 50, + "max_steps": 12048, + "num_input_tokens_seen": 0, + "num_train_epochs": 8, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 5.094247305025536e+16, + "train_batch_size": 32, + "trial_name": null, + "trial_params": null +} diff --git a/trial-3/checkpoint-1506/training_args.bin b/trial-3/checkpoint-1506/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..da2bfe9f66f55cdb26a198ded1b2321a463a8372 --- /dev/null +++ b/trial-3/checkpoint-1506/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:862f96821c71b38bd72c3c10e43adbfe554490b2332bd838409265267f16997e +size 5368 diff --git a/trial-4/checkpoint-3012/config.json b/trial-4/checkpoint-3012/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/trial-4/checkpoint-3012/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/trial-4/checkpoint-3012/model.safetensors b/trial-4/checkpoint-3012/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..4aef95b83bc8d1ddd2f29e40740ad8f3f39d5562 --- /dev/null +++ b/trial-4/checkpoint-3012/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84a78c605436363f9db346ac678e170cff4e009ca2331def7f65ff704e3349e4 +size 1583351632 diff --git a/trial-4/checkpoint-3012/optimizer.pt b/trial-4/checkpoint-3012/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..0c50a08bf3468bf5da4a4ae11efc1183ec45362c --- /dev/null +++ b/trial-4/checkpoint-3012/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4385bc937b3ea0adfa5c713df04eb100e89eb731c8f09a620e7ed3c4d32d4df3 +size 3166813178 diff --git a/trial-4/checkpoint-3012/rng_state.pth b/trial-4/checkpoint-3012/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..b387de0c48181ec5812538ddf1fc60cfda1a89c1 --- /dev/null +++ b/trial-4/checkpoint-3012/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:914f37830aa379563c31bd15a8b8f53b8ccc8e2de0f0aa6da9695369e4ad84ef +size 14244 diff --git a/trial-4/checkpoint-3012/scheduler.pt b/trial-4/checkpoint-3012/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..1d9bd2473242ce3fbe0329a2f36e48a19da05a94 --- /dev/null +++ b/trial-4/checkpoint-3012/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:159228974380d6d8a61d5fb2da4e5cc76ffc86287d9ca0582ad4c46cb816cc56 +size 1064 diff --git a/trial-4/checkpoint-3012/trainer_state.json b/trial-4/checkpoint-3012/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..997ee714afd97e4a10c30ef416ac22e5d54b0cef --- /dev/null +++ b/trial-4/checkpoint-3012/trainer_state.json @@ -0,0 +1,477 @@ +{ + "best_metric": 0.019555753096938133, + "best_model_checkpoint": "./results/answerdotai/ModernBERT-large/trial-4/checkpoint-3012", + "epoch": 2.0, + "eval_steps": 500, + "global_step": 3012, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.033200531208499334, + "grad_norm": 111.04224395751953, + "learning_rate": 1.1277048866432555e-05, + "loss": 0.3116, + "step": 50 + }, + { + "epoch": 0.06640106241699867, + "grad_norm": 0.14363102614879608, + "learning_rate": 1.123948374695743e-05, + "loss": 0.098, + "step": 100 + }, + { + "epoch": 0.099601593625498, + "grad_norm": 0.10250398516654968, + "learning_rate": 1.1201918627482305e-05, + "loss": 0.0729, + "step": 150 + }, + { + "epoch": 0.13280212483399734, + "grad_norm": 1.9557462930679321, + "learning_rate": 1.116435350800718e-05, + "loss": 0.0537, + "step": 200 + }, + { + "epoch": 0.16600265604249667, + "grad_norm": 0.5240038633346558, + "learning_rate": 1.1126788388532055e-05, + "loss": 0.0442, + "step": 250 + }, + { + "epoch": 0.199203187250996, + "grad_norm": 2.139970541000366, + "learning_rate": 1.1089223269056931e-05, + "loss": 0.036, + "step": 300 + }, + { + "epoch": 0.23240371845949534, + "grad_norm": 0.04191768541932106, + "learning_rate": 1.1051658149581805e-05, + "loss": 0.0355, + "step": 350 + }, + { + "epoch": 0.2656042496679947, + "grad_norm": 0.0023582959547638893, + "learning_rate": 1.1014093030106681e-05, + "loss": 0.0368, + "step": 400 + }, + { + "epoch": 0.29880478087649404, + "grad_norm": 12.193012237548828, + "learning_rate": 1.0976527910631555e-05, + "loss": 0.0574, + "step": 450 + }, + { + "epoch": 0.33200531208499334, + "grad_norm": 0.00729788513854146, + "learning_rate": 1.0938962791156431e-05, + "loss": 0.0238, + "step": 500 + }, + { + "epoch": 0.3652058432934927, + "grad_norm": 1.7422609329223633, + "learning_rate": 1.0901397671681305e-05, + "loss": 0.0213, + "step": 550 + }, + { + "epoch": 0.398406374501992, + "grad_norm": 0.06725198775529861, + "learning_rate": 1.086383255220618e-05, + "loss": 0.013, + "step": 600 + }, + { + "epoch": 0.4316069057104914, + "grad_norm": 0.21940571069717407, + "learning_rate": 1.0826267432731055e-05, + "loss": 0.025, + "step": 650 + }, + { + "epoch": 0.4648074369189907, + "grad_norm": 0.032884348183870316, + "learning_rate": 1.078870231325593e-05, + "loss": 0.0138, + "step": 700 + }, + { + "epoch": 0.49800796812749004, + "grad_norm": 0.0014803586527705193, + "learning_rate": 1.0751137193780805e-05, + "loss": 0.0058, + "step": 750 + }, + { + "epoch": 0.5312084993359893, + "grad_norm": 10.339282989501953, + "learning_rate": 1.071357207430568e-05, + "loss": 0.009, + "step": 800 + }, + { + "epoch": 0.5644090305444888, + "grad_norm": 1.531823754310608, + "learning_rate": 1.0676006954830555e-05, + "loss": 0.0115, + "step": 850 + }, + { + "epoch": 0.5976095617529881, + "grad_norm": 0.00043045339407399297, + "learning_rate": 1.063844183535543e-05, + "loss": 0.0068, + "step": 900 + }, + { + "epoch": 0.6308100929614874, + "grad_norm": 0.025898275896906853, + "learning_rate": 1.0600876715880303e-05, + "loss": 0.0098, + "step": 950 + }, + { + "epoch": 0.6640106241699867, + "grad_norm": 0.0007346518104895949, + "learning_rate": 1.0563311596405178e-05, + "loss": 0.0094, + "step": 1000 + }, + { + "epoch": 0.6972111553784861, + "grad_norm": 0.005919306073337793, + "learning_rate": 1.0525746476930054e-05, + "loss": 0.0115, + "step": 1050 + }, + { + "epoch": 0.7304116865869854, + "grad_norm": 0.04206903651356697, + "learning_rate": 1.0488181357454928e-05, + "loss": 0.0061, + "step": 1100 + }, + { + "epoch": 0.7636122177954847, + "grad_norm": 0.000557853898499161, + "learning_rate": 1.0450616237979804e-05, + "loss": 0.0024, + "step": 1150 + }, + { + "epoch": 0.796812749003984, + "grad_norm": 0.0012018937850371003, + "learning_rate": 1.0413051118504678e-05, + "loss": 0.0058, + "step": 1200 + }, + { + "epoch": 0.8300132802124834, + "grad_norm": 0.0013845885405316949, + "learning_rate": 1.0375485999029554e-05, + "loss": 0.0022, + "step": 1250 + }, + { + "epoch": 0.8632138114209827, + "grad_norm": 0.07051751017570496, + "learning_rate": 1.0337920879554428e-05, + "loss": 0.0049, + "step": 1300 + }, + { + "epoch": 0.896414342629482, + "grad_norm": 0.00019932868599426, + "learning_rate": 1.0300355760079302e-05, + "loss": 0.0026, + "step": 1350 + }, + { + "epoch": 0.9296148738379814, + "grad_norm": 2.3489619707106613e-05, + "learning_rate": 1.0262790640604178e-05, + "loss": 0.0027, + "step": 1400 + }, + { + "epoch": 0.9628154050464808, + "grad_norm": 0.0037654500920325518, + "learning_rate": 1.0225225521129052e-05, + "loss": 0.0079, + "step": 1450 + }, + { + "epoch": 0.9960159362549801, + "grad_norm": 0.05777144059538841, + "learning_rate": 1.0187660401653928e-05, + "loss": 0.0154, + "step": 1500 + }, + { + "epoch": 1.0, + "eval_accuracy": 0.9967795801526718, + "eval_f1": 0.9967913721697382, + "eval_loss": 0.022535286843776703, + "eval_precision": 0.9968076807415951, + "eval_recall": 0.9967795801526718, + "eval_runtime": 59.7088, + "eval_samples_per_second": 140.415, + "eval_steps_per_second": 4.388, + "step": 1506 + }, + { + "epoch": 1.0292164674634794, + "grad_norm": 0.020927241072058678, + "learning_rate": 1.0150095282178802e-05, + "loss": 0.0068, + "step": 1550 + }, + { + "epoch": 1.0624169986719787, + "grad_norm": 0.00013940146891400218, + "learning_rate": 1.0112530162703678e-05, + "loss": 0.0023, + "step": 1600 + }, + { + "epoch": 1.095617529880478, + "grad_norm": 0.01088524330407381, + "learning_rate": 1.0074965043228552e-05, + "loss": 0.0062, + "step": 1650 + }, + { + "epoch": 1.1288180610889773, + "grad_norm": 0.058450598269701004, + "learning_rate": 1.0037399923753428e-05, + "loss": 0.0153, + "step": 1700 + }, + { + "epoch": 1.1620185922974768, + "grad_norm": 0.011701447889208794, + "learning_rate": 9.999834804278302e-06, + "loss": 0.0105, + "step": 1750 + }, + { + "epoch": 1.1952191235059761, + "grad_norm": 3.078742742538452, + "learning_rate": 9.962269684803178e-06, + "loss": 0.0076, + "step": 1800 + }, + { + "epoch": 1.2284196547144755, + "grad_norm": 0.02360646426677704, + "learning_rate": 9.924704565328052e-06, + "loss": 0.0025, + "step": 1850 + }, + { + "epoch": 1.2616201859229748, + "grad_norm": 0.0027641034685075283, + "learning_rate": 9.887139445852926e-06, + "loss": 0.0114, + "step": 1900 + }, + { + "epoch": 1.294820717131474, + "grad_norm": 0.00172056641895324, + "learning_rate": 9.849574326377802e-06, + "loss": 0.0003, + "step": 1950 + }, + { + "epoch": 1.3280212483399734, + "grad_norm": 0.07806120812892914, + "learning_rate": 9.812009206902676e-06, + "loss": 0.0008, + "step": 2000 + }, + { + "epoch": 1.361221779548473, + "grad_norm": 0.000884020933881402, + "learning_rate": 9.774444087427552e-06, + "loss": 0.0038, + "step": 2050 + }, + { + "epoch": 1.3944223107569722, + "grad_norm": 0.1488543003797531, + "learning_rate": 9.736878967952426e-06, + "loss": 0.0106, + "step": 2100 + }, + { + "epoch": 1.4276228419654715, + "grad_norm": 0.037523552775382996, + "learning_rate": 9.699313848477302e-06, + "loss": 0.0007, + "step": 2150 + }, + { + "epoch": 1.4608233731739708, + "grad_norm": 0.00033480292768217623, + "learning_rate": 9.661748729002176e-06, + "loss": 0.0116, + "step": 2200 + }, + { + "epoch": 1.4940239043824701, + "grad_norm": 0.007270739413797855, + "learning_rate": 9.624183609527052e-06, + "loss": 0.0079, + "step": 2250 + }, + { + "epoch": 1.5272244355909694, + "grad_norm": 0.00751983979716897, + "learning_rate": 9.586618490051926e-06, + "loss": 0.0044, + "step": 2300 + }, + { + "epoch": 1.5604249667994687, + "grad_norm": 4.298997402191162, + "learning_rate": 9.549053370576802e-06, + "loss": 0.0014, + "step": 2350 + }, + { + "epoch": 1.593625498007968, + "grad_norm": 0.0014925749273970723, + "learning_rate": 9.511488251101676e-06, + "loss": 0.0001, + "step": 2400 + }, + { + "epoch": 1.6268260292164674, + "grad_norm": 0.003561707679182291, + "learning_rate": 9.47392313162655e-06, + "loss": 0.0077, + "step": 2450 + }, + { + "epoch": 1.6600265604249667, + "grad_norm": 0.0006838434492237866, + "learning_rate": 9.436358012151426e-06, + "loss": 0.0001, + "step": 2500 + }, + { + "epoch": 1.6932270916334662, + "grad_norm": 0.0002309294941369444, + "learning_rate": 9.3987928926763e-06, + "loss": 0.0029, + "step": 2550 + }, + { + "epoch": 1.7264276228419655, + "grad_norm": 0.0011594091774895787, + "learning_rate": 9.361227773201176e-06, + "loss": 0.0107, + "step": 2600 + }, + { + "epoch": 1.7596281540504648, + "grad_norm": 0.00012066392082488164, + "learning_rate": 9.32366265372605e-06, + "loss": 0.0006, + "step": 2650 + }, + { + "epoch": 1.792828685258964, + "grad_norm": 0.0021935878321528435, + "learning_rate": 9.286097534250926e-06, + "loss": 0.0065, + "step": 2700 + }, + { + "epoch": 1.8260292164674636, + "grad_norm": 0.0002105861931340769, + "learning_rate": 9.2485324147758e-06, + "loss": 0.0032, + "step": 2750 + }, + { + "epoch": 1.859229747675963, + "grad_norm": 0.0009871097281575203, + "learning_rate": 9.210967295300676e-06, + "loss": 0.0, + "step": 2800 + }, + { + "epoch": 1.8924302788844622, + "grad_norm": 5.9001271438319236e-05, + "learning_rate": 9.173402175825549e-06, + "loss": 0.0077, + "step": 2850 + }, + { + "epoch": 1.9256308100929616, + "grad_norm": 0.001614038716070354, + "learning_rate": 9.135837056350425e-06, + "loss": 0.0059, + "step": 2900 + }, + { + "epoch": 1.9588313413014609, + "grad_norm": 0.001071999897249043, + "learning_rate": 9.098271936875299e-06, + "loss": 0.008, + "step": 2950 + }, + { + "epoch": 1.9920318725099602, + "grad_norm": 0.018942702561616898, + "learning_rate": 9.060706817400175e-06, + "loss": 0.0107, + "step": 3000 + }, + { + "epoch": 2.0, + "eval_accuracy": 0.997256679389313, + "eval_f1": 0.997238185991172, + "eval_loss": 0.019555753096938133, + "eval_precision": 0.997235028769195, + "eval_recall": 0.997256679389313, + "eval_runtime": 59.293, + "eval_samples_per_second": 141.4, + "eval_steps_per_second": 4.419, + "step": 3012 + } + ], + "logging_steps": 50, + "max_steps": 15060, + "num_input_tokens_seen": 0, + "num_train_epochs": 10, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 1.0188494610051072e+17, + "train_batch_size": 32, + "trial_name": null, + "trial_params": null +} diff --git a/trial-4/checkpoint-3012/training_args.bin b/trial-4/checkpoint-3012/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..6248646a71589b894679be2023592a9168c667cf --- /dev/null +++ b/trial-4/checkpoint-3012/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6980b2d26960e0944dcf6a709651feb944395421a5df274ddff277d8343607e +size 5368 diff --git a/trial-5/checkpoint-1506/config.json b/trial-5/checkpoint-1506/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/trial-5/checkpoint-1506/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/trial-5/checkpoint-1506/model.safetensors b/trial-5/checkpoint-1506/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5abdd896ebdc3897cb769570d0ed07a3faa42b39 --- /dev/null +++ b/trial-5/checkpoint-1506/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f5ecd51b91e86db092fb98e5e0ed6bb61c710dc95a00fe365f829c3218537a1 +size 1583351632 diff --git a/trial-5/checkpoint-1506/optimizer.pt b/trial-5/checkpoint-1506/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..de3968d967846bf38f1db5069c146ceca0279b28 --- /dev/null +++ b/trial-5/checkpoint-1506/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0309590d2cd916de4a2f04ddce5837c3c90bd59129970b6cb4f3ed8ac4e03b7 +size 3166813178 diff --git a/trial-5/checkpoint-1506/rng_state.pth b/trial-5/checkpoint-1506/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..cf3d91c5392ca6b7d7e0880933b7830a896d7c9e --- /dev/null +++ b/trial-5/checkpoint-1506/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:568428d80a25211a390c359ca51b0b20b38ca0607fbc196f106c9841c02d3e59 +size 14244 diff --git a/trial-5/checkpoint-1506/scheduler.pt b/trial-5/checkpoint-1506/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..ec19ed6fd9feac9bb72fa293583e28715b752bdd --- /dev/null +++ b/trial-5/checkpoint-1506/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c12ccce76e7b5dcd2d6e5c79dcffd36e8cc7c4912a60cfbc89196c37d5a03edc +size 1064 diff --git a/trial-5/checkpoint-1506/trainer_state.json b/trial-5/checkpoint-1506/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..96f5f8054dc213ab071377d711dcfd292fcd8f3a --- /dev/null +++ b/trial-5/checkpoint-1506/trainer_state.json @@ -0,0 +1,255 @@ +{ + "best_metric": 0.022090721875429153, + "best_model_checkpoint": "./results/answerdotai/ModernBERT-large/trial-5/checkpoint-1506", + "epoch": 1.0, + "eval_steps": 500, + "global_step": 1506, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.033200531208499334, + "grad_norm": 21.02681541442871, + "learning_rate": 1.1549761256591334e-05, + "loss": 0.3094, + "step": 50 + }, + { + "epoch": 0.06640106241699867, + "grad_norm": 14.655999183654785, + "learning_rate": 1.1494720457617871e-05, + "loss": 0.0991, + "step": 100 + }, + { + "epoch": 0.099601593625498, + "grad_norm": 0.3093714416027069, + "learning_rate": 1.143967965864441e-05, + "loss": 0.045, + "step": 150 + }, + { + "epoch": 0.13280212483399734, + "grad_norm": 0.19615088403224945, + "learning_rate": 1.1384638859670947e-05, + "loss": 0.0505, + "step": 200 + }, + { + "epoch": 0.16600265604249667, + "grad_norm": 1.4286335706710815, + "learning_rate": 1.1329598060697483e-05, + "loss": 0.0281, + "step": 250 + }, + { + "epoch": 0.199203187250996, + "grad_norm": 2.1918282508850098, + "learning_rate": 1.1274557261724022e-05, + "loss": 0.0536, + "step": 300 + }, + { + "epoch": 0.23240371845949534, + "grad_norm": 2.13693904876709, + "learning_rate": 1.1219516462750559e-05, + "loss": 0.0385, + "step": 350 + }, + { + "epoch": 0.2656042496679947, + "grad_norm": 0.029252415522933006, + "learning_rate": 1.1164475663777096e-05, + "loss": 0.0326, + "step": 400 + }, + { + "epoch": 0.29880478087649404, + "grad_norm": 0.5969660878181458, + "learning_rate": 1.1109434864803635e-05, + "loss": 0.0547, + "step": 450 + }, + { + "epoch": 0.33200531208499334, + "grad_norm": 0.00738520547747612, + "learning_rate": 1.1054394065830171e-05, + "loss": 0.0115, + "step": 500 + }, + { + "epoch": 0.3652058432934927, + "grad_norm": 1.9720779657363892, + "learning_rate": 1.099935326685671e-05, + "loss": 0.0239, + "step": 550 + }, + { + "epoch": 0.398406374501992, + "grad_norm": 0.6309007406234741, + "learning_rate": 1.0944312467883247e-05, + "loss": 0.0043, + "step": 600 + }, + { + "epoch": 0.4316069057104914, + "grad_norm": 0.011916632764041424, + "learning_rate": 1.0889271668909786e-05, + "loss": 0.0164, + "step": 650 + }, + { + "epoch": 0.4648074369189907, + "grad_norm": 0.009058245457708836, + "learning_rate": 1.0834230869936323e-05, + "loss": 0.0103, + "step": 700 + }, + { + "epoch": 0.49800796812749004, + "grad_norm": 0.0032912548631429672, + "learning_rate": 1.0779190070962861e-05, + "loss": 0.028, + "step": 750 + }, + { + "epoch": 0.5312084993359893, + "grad_norm": 0.011074424721300602, + "learning_rate": 1.0724149271989398e-05, + "loss": 0.0104, + "step": 800 + }, + { + "epoch": 0.5644090305444888, + "grad_norm": 0.951624870300293, + "learning_rate": 1.0669108473015937e-05, + "loss": 0.0114, + "step": 850 + }, + { + "epoch": 0.5976095617529881, + "grad_norm": 0.0027369000017642975, + "learning_rate": 1.0614067674042474e-05, + "loss": 0.016, + "step": 900 + }, + { + "epoch": 0.6308100929614874, + "grad_norm": 0.012001908384263515, + "learning_rate": 1.055902687506901e-05, + "loss": 0.0094, + "step": 950 + }, + { + "epoch": 0.6640106241699867, + "grad_norm": 0.69849693775177, + "learning_rate": 1.050398607609555e-05, + "loss": 0.0199, + "step": 1000 + }, + { + "epoch": 0.6972111553784861, + "grad_norm": 0.036301348358392715, + "learning_rate": 1.0448945277122086e-05, + "loss": 0.0077, + "step": 1050 + }, + { + "epoch": 0.7304116865869854, + "grad_norm": 0.008320258930325508, + "learning_rate": 1.0393904478148625e-05, + "loss": 0.0043, + "step": 1100 + }, + { + "epoch": 0.7636122177954847, + "grad_norm": 0.0027414376381784678, + "learning_rate": 1.0338863679175162e-05, + "loss": 0.004, + "step": 1150 + }, + { + "epoch": 0.796812749003984, + "grad_norm": 0.0007768127834424376, + "learning_rate": 1.02838228802017e-05, + "loss": 0.0072, + "step": 1200 + }, + { + "epoch": 0.8300132802124834, + "grad_norm": 0.0004548605065792799, + "learning_rate": 1.0228782081228237e-05, + "loss": 0.0051, + "step": 1250 + }, + { + "epoch": 0.8632138114209827, + "grad_norm": 0.007403654046356678, + "learning_rate": 1.0173741282254776e-05, + "loss": 0.0112, + "step": 1300 + }, + { + "epoch": 0.896414342629482, + "grad_norm": 0.0017905730055645108, + "learning_rate": 1.0118700483281313e-05, + "loss": 0.0069, + "step": 1350 + }, + { + "epoch": 0.9296148738379814, + "grad_norm": 0.0004039919876959175, + "learning_rate": 1.0063659684307851e-05, + "loss": 0.0008, + "step": 1400 + }, + { + "epoch": 0.9628154050464808, + "grad_norm": 0.000610634742770344, + "learning_rate": 1.0008618885334388e-05, + "loss": 0.0011, + "step": 1450 + }, + { + "epoch": 0.9960159362549801, + "grad_norm": 1.0261385440826416, + "learning_rate": 9.953578086360925e-06, + "loss": 0.0207, + "step": 1500 + }, + { + "epoch": 1.0, + "eval_accuracy": 0.9971374045801527, + "eval_f1": 0.9971246021172431, + "eval_loss": 0.022090721875429153, + "eval_precision": 0.9971185448871154, + "eval_recall": 0.9971374045801527, + "eval_runtime": 59.3469, + "eval_samples_per_second": 141.271, + "eval_steps_per_second": 4.415, + "step": 1506 + } + ], + "logging_steps": 50, + "max_steps": 10542, + "num_input_tokens_seen": 0, + "num_train_epochs": 7, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 5.094247305025536e+16, + "train_batch_size": 32, + "trial_name": null, + "trial_params": null +} diff --git a/trial-5/checkpoint-1506/training_args.bin b/trial-5/checkpoint-1506/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..7a2ae06b3650e503e2d0bf4d0512255e9e3a0c82 --- /dev/null +++ b/trial-5/checkpoint-1506/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57df86703a932a26c855abebf91502e9da3c5daba164538c1fa2ecde95e9c014 +size 5368 diff --git a/trial-6/checkpoint-1506/config.json b/trial-6/checkpoint-1506/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/trial-6/checkpoint-1506/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/trial-6/checkpoint-1506/model.safetensors b/trial-6/checkpoint-1506/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..46d99f94fa9c46ad2df97420ebcd2ffc72c3a813 --- /dev/null +++ b/trial-6/checkpoint-1506/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d8dd9c50dd83347fd2f3d076bda888d52bc1f28720f6537878e55817f57b843 +size 1583351632 diff --git a/trial-6/checkpoint-1506/optimizer.pt b/trial-6/checkpoint-1506/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..5aac90610cdc9a6fbeea36f93dbdbe5f9b68f027 --- /dev/null +++ b/trial-6/checkpoint-1506/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba4b588a87754fc94b6d70748348851ee39e0cd46fcba5bc4098f18c984a4eee +size 3166813178 diff --git a/trial-6/checkpoint-1506/rng_state.pth b/trial-6/checkpoint-1506/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..cf3d91c5392ca6b7d7e0880933b7830a896d7c9e --- /dev/null +++ b/trial-6/checkpoint-1506/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:568428d80a25211a390c359ca51b0b20b38ca0607fbc196f106c9841c02d3e59 +size 14244 diff --git a/trial-6/checkpoint-1506/scheduler.pt b/trial-6/checkpoint-1506/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..fdb384a31a704a1eda6fcc9a5f2d006cea007db4 --- /dev/null +++ b/trial-6/checkpoint-1506/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70df650daa8258dd28f35a8ea58ad591c5359c1ae315bc78aa95d246c03b30ae +size 1064 diff --git a/trial-6/checkpoint-1506/trainer_state.json b/trial-6/checkpoint-1506/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..92698fc798b2e21cb700a6388e2ba6c89f3ce12a --- /dev/null +++ b/trial-6/checkpoint-1506/trainer_state.json @@ -0,0 +1,255 @@ +{ + "best_metric": 0.02227787859737873, + "best_model_checkpoint": "./results/answerdotai/ModernBERT-large/trial-6/checkpoint-1506", + "epoch": 1.0, + "eval_steps": 500, + "global_step": 1506, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.033200531208499334, + "grad_norm": 23.0550537109375, + "learning_rate": 9.024759672779674e-06, + "loss": 0.3174, + "step": 50 + }, + { + "epoch": 0.06640106241699867, + "grad_norm": 0.09903453290462494, + "learning_rate": 8.923766390855103e-06, + "loss": 0.1041, + "step": 100 + }, + { + "epoch": 0.099601593625498, + "grad_norm": 0.009657522663474083, + "learning_rate": 8.822773108930532e-06, + "loss": 0.0577, + "step": 150 + }, + { + "epoch": 0.13280212483399734, + "grad_norm": 2.3746416568756104, + "learning_rate": 8.72177982700596e-06, + "loss": 0.0498, + "step": 200 + }, + { + "epoch": 0.16600265604249667, + "grad_norm": 0.7699489593505859, + "learning_rate": 8.62078654508139e-06, + "loss": 0.05, + "step": 250 + }, + { + "epoch": 0.199203187250996, + "grad_norm": 4.535607814788818, + "learning_rate": 8.519793263156818e-06, + "loss": 0.0324, + "step": 300 + }, + { + "epoch": 0.23240371845949534, + "grad_norm": 0.05702704191207886, + "learning_rate": 8.418799981232248e-06, + "loss": 0.0445, + "step": 350 + }, + { + "epoch": 0.2656042496679947, + "grad_norm": 0.004001598339527845, + "learning_rate": 8.317806699307677e-06, + "loss": 0.0301, + "step": 400 + }, + { + "epoch": 0.29880478087649404, + "grad_norm": 0.0037657192442566156, + "learning_rate": 8.216813417383106e-06, + "loss": 0.0319, + "step": 450 + }, + { + "epoch": 0.33200531208499334, + "grad_norm": 0.001200846047140658, + "learning_rate": 8.115820135458535e-06, + "loss": 0.0256, + "step": 500 + }, + { + "epoch": 0.3652058432934927, + "grad_norm": 11.810125350952148, + "learning_rate": 8.014826853533963e-06, + "loss": 0.0266, + "step": 550 + }, + { + "epoch": 0.398406374501992, + "grad_norm": 4.256345748901367, + "learning_rate": 7.913833571609392e-06, + "loss": 0.0091, + "step": 600 + }, + { + "epoch": 0.4316069057104914, + "grad_norm": 0.0014609894715249538, + "learning_rate": 7.812840289684821e-06, + "loss": 0.0238, + "step": 650 + }, + { + "epoch": 0.4648074369189907, + "grad_norm": 0.0008413286413997412, + "learning_rate": 7.71184700776025e-06, + "loss": 0.0099, + "step": 700 + }, + { + "epoch": 0.49800796812749004, + "grad_norm": 0.004391686990857124, + "learning_rate": 7.610853725835679e-06, + "loss": 0.0157, + "step": 750 + }, + { + "epoch": 0.5312084993359893, + "grad_norm": 0.12218283116817474, + "learning_rate": 7.509860443911107e-06, + "loss": 0.0133, + "step": 800 + }, + { + "epoch": 0.5644090305444888, + "grad_norm": 0.7999175190925598, + "learning_rate": 7.408867161986537e-06, + "loss": 0.0113, + "step": 850 + }, + { + "epoch": 0.5976095617529881, + "grad_norm": 0.0003924691991414875, + "learning_rate": 7.307873880061965e-06, + "loss": 0.008, + "step": 900 + }, + { + "epoch": 0.6308100929614874, + "grad_norm": 0.01373638678342104, + "learning_rate": 7.206880598137394e-06, + "loss": 0.0106, + "step": 950 + }, + { + "epoch": 0.6640106241699867, + "grad_norm": 0.01725645922124386, + "learning_rate": 7.105887316212824e-06, + "loss": 0.0086, + "step": 1000 + }, + { + "epoch": 0.6972111553784861, + "grad_norm": 0.006537695415318012, + "learning_rate": 7.004894034288252e-06, + "loss": 0.003, + "step": 1050 + }, + { + "epoch": 0.7304116865869854, + "grad_norm": 0.003164461348205805, + "learning_rate": 6.9039007523636815e-06, + "loss": 0.0018, + "step": 1100 + }, + { + "epoch": 0.7636122177954847, + "grad_norm": 0.0006072334945201874, + "learning_rate": 6.802907470439109e-06, + "loss": 0.0006, + "step": 1150 + }, + { + "epoch": 0.796812749003984, + "grad_norm": 0.005962160881608725, + "learning_rate": 6.701914188514539e-06, + "loss": 0.0074, + "step": 1200 + }, + { + "epoch": 0.8300132802124834, + "grad_norm": 0.00025012181140482426, + "learning_rate": 6.600920906589967e-06, + "loss": 0.0022, + "step": 1250 + }, + { + "epoch": 0.8632138114209827, + "grad_norm": 0.274515300989151, + "learning_rate": 6.4999276246653965e-06, + "loss": 0.0044, + "step": 1300 + }, + { + "epoch": 0.896414342629482, + "grad_norm": 0.00027220029733143747, + "learning_rate": 6.398934342740826e-06, + "loss": 0.0014, + "step": 1350 + }, + { + "epoch": 0.9296148738379814, + "grad_norm": 0.00016820215387269855, + "learning_rate": 6.297941060816254e-06, + "loss": 0.0001, + "step": 1400 + }, + { + "epoch": 0.9628154050464808, + "grad_norm": 0.10992325842380524, + "learning_rate": 6.196947778891684e-06, + "loss": 0.0003, + "step": 1450 + }, + { + "epoch": 0.9960159362549801, + "grad_norm": 0.006835253443568945, + "learning_rate": 6.0959544969671116e-06, + "loss": 0.018, + "step": 1500 + }, + { + "epoch": 1.0, + "eval_accuracy": 0.9976145038167938, + "eval_f1": 0.9975966066790106, + "eval_loss": 0.02227787859737873, + "eval_precision": 0.9975977885860469, + "eval_recall": 0.9976145038167938, + "eval_runtime": 59.8104, + "eval_samples_per_second": 140.176, + "eval_steps_per_second": 4.381, + "step": 1506 + } + ], + "logging_steps": 50, + "max_steps": 4518, + "num_input_tokens_seen": 0, + "num_train_epochs": 3, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 5.094247305025536e+16, + "train_batch_size": 32, + "trial_name": null, + "trial_params": null +} diff --git a/trial-6/checkpoint-1506/training_args.bin b/trial-6/checkpoint-1506/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..e393ffae98b160825d6038bbbecca49d22a2bc31 --- /dev/null +++ b/trial-6/checkpoint-1506/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b58ae1bab3cc88e29382874ef388d5cd6272fd0008643c368666f7b2392869e +size 5368 diff --git a/trial-7/checkpoint-3011/config.json b/trial-7/checkpoint-3011/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/trial-7/checkpoint-3011/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/trial-7/checkpoint-3011/model.safetensors b/trial-7/checkpoint-3011/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6955bfcf1ec6b6c7f83239d3b84983a5d7bad521 --- /dev/null +++ b/trial-7/checkpoint-3011/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0859c4b1892485b74b8ecaf3fab59380ff1d3f0c2341658af48b7de4dc7d8b51 +size 1583351632 diff --git a/trial-7/checkpoint-3011/optimizer.pt b/trial-7/checkpoint-3011/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..446ab80386f09dae23e22023faeee702ee7720e7 --- /dev/null +++ b/trial-7/checkpoint-3011/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71c4f9b3a248cfe25e91ce24cc7fb2f42df3c6a2ac9a3c3b6d9a30fa9d58b990 +size 3166813178 diff --git a/trial-7/checkpoint-3011/rng_state.pth b/trial-7/checkpoint-3011/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..cf3d91c5392ca6b7d7e0880933b7830a896d7c9e --- /dev/null +++ b/trial-7/checkpoint-3011/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:568428d80a25211a390c359ca51b0b20b38ca0607fbc196f106c9841c02d3e59 +size 14244 diff --git a/trial-7/checkpoint-3011/scheduler.pt b/trial-7/checkpoint-3011/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..b7f76547d1d7217e1b5c31cc76dc2c2b9a49f692 --- /dev/null +++ b/trial-7/checkpoint-3011/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fef8d62468fcaf63e4e91c49f799e866ab78b86b95db2e7694d7dcb323641b3 +size 1064 diff --git a/trial-7/checkpoint-3011/trainer_state.json b/trial-7/checkpoint-3011/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..59d27a70878a3d9cdba1fe22dd5b531214c9748b --- /dev/null +++ b/trial-7/checkpoint-3011/trainer_state.json @@ -0,0 +1,465 @@ +{ + "best_metric": 0.02605549804866314, + "best_model_checkpoint": "./results/answerdotai/ModernBERT-large/trial-7/checkpoint-3011", + "epoch": 1.0, + "eval_steps": 500, + "global_step": 3011, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.016605778811026237, + "grad_norm": 12.109246253967285, + "learning_rate": 4.671994715185228e-06, + "loss": 0.3942, + "step": 50 + }, + { + "epoch": 0.033211557622052475, + "grad_norm": 18.157163619995117, + "learning_rate": 4.660885201904244e-06, + "loss": 0.1954, + "step": 100 + }, + { + "epoch": 0.04981733643307871, + "grad_norm": 22.050230026245117, + "learning_rate": 4.649775688623259e-06, + "loss": 0.1289, + "step": 150 + }, + { + "epoch": 0.06642311524410495, + "grad_norm": 0.06943712383508682, + "learning_rate": 4.638666175342274e-06, + "loss": 0.0886, + "step": 200 + }, + { + "epoch": 0.08302889405513118, + "grad_norm": 0.16679124534130096, + "learning_rate": 4.6275566620612905e-06, + "loss": 0.024, + "step": 250 + }, + { + "epoch": 0.09963467286615742, + "grad_norm": 0.02219686098396778, + "learning_rate": 4.616447148780305e-06, + "loss": 0.0835, + "step": 300 + }, + { + "epoch": 0.11624045167718366, + "grad_norm": 0.2904365062713623, + "learning_rate": 4.605337635499321e-06, + "loss": 0.0432, + "step": 350 + }, + { + "epoch": 0.1328462304882099, + "grad_norm": 17.55352020263672, + "learning_rate": 4.5942281222183364e-06, + "loss": 0.0565, + "step": 400 + }, + { + "epoch": 0.14945200929923613, + "grad_norm": 0.029744828119874, + "learning_rate": 4.583118608937352e-06, + "loss": 0.0537, + "step": 450 + }, + { + "epoch": 0.16605778811026237, + "grad_norm": 0.015743639320135117, + "learning_rate": 4.572009095656368e-06, + "loss": 0.068, + "step": 500 + }, + { + "epoch": 0.1826635669212886, + "grad_norm": 0.021552711725234985, + "learning_rate": 4.560899582375383e-06, + "loss": 0.0289, + "step": 550 + }, + { + "epoch": 0.19926934573231483, + "grad_norm": 0.007452903315424919, + "learning_rate": 4.549790069094398e-06, + "loss": 0.0581, + "step": 600 + }, + { + "epoch": 0.2158751245433411, + "grad_norm": 0.016020536422729492, + "learning_rate": 4.538680555813414e-06, + "loss": 0.0471, + "step": 650 + }, + { + "epoch": 0.23248090335436733, + "grad_norm": 0.10039343684911728, + "learning_rate": 4.527571042532429e-06, + "loss": 0.0568, + "step": 700 + }, + { + "epoch": 0.24908668216539356, + "grad_norm": 0.0008059324463829398, + "learning_rate": 4.516461529251445e-06, + "loss": 0.0194, + "step": 750 + }, + { + "epoch": 0.2656924609764198, + "grad_norm": 0.00016456829325761646, + "learning_rate": 4.50535201597046e-06, + "loss": 0.0488, + "step": 800 + }, + { + "epoch": 0.282298239787446, + "grad_norm": 13.807262420654297, + "learning_rate": 4.494242502689476e-06, + "loss": 0.0613, + "step": 850 + }, + { + "epoch": 0.29890401859847227, + "grad_norm": 0.0318632535636425, + "learning_rate": 4.483132989408491e-06, + "loss": 0.0237, + "step": 900 + }, + { + "epoch": 0.3155097974094985, + "grad_norm": 0.005790573079138994, + "learning_rate": 4.472023476127507e-06, + "loss": 0.0289, + "step": 950 + }, + { + "epoch": 0.33211557622052473, + "grad_norm": 0.004899414721876383, + "learning_rate": 4.4609139628465226e-06, + "loss": 0.0288, + "step": 1000 + }, + { + "epoch": 0.348721355031551, + "grad_norm": 0.0006392050418071449, + "learning_rate": 4.449804449565537e-06, + "loss": 0.0435, + "step": 1050 + }, + { + "epoch": 0.3653271338425772, + "grad_norm": 0.00029063530382700264, + "learning_rate": 4.438694936284554e-06, + "loss": 0.0194, + "step": 1100 + }, + { + "epoch": 0.38193291265360346, + "grad_norm": 0.2680830657482147, + "learning_rate": 4.4275854230035685e-06, + "loss": 0.0164, + "step": 1150 + }, + { + "epoch": 0.39853869146462967, + "grad_norm": 0.0035903984680771828, + "learning_rate": 4.416475909722584e-06, + "loss": 0.0167, + "step": 1200 + }, + { + "epoch": 0.41514447027565593, + "grad_norm": 0.3019547164440155, + "learning_rate": 4.4053663964416e-06, + "loss": 0.0183, + "step": 1250 + }, + { + "epoch": 0.4317502490866822, + "grad_norm": 0.310628205537796, + "learning_rate": 4.394256883160615e-06, + "loss": 0.0492, + "step": 1300 + }, + { + "epoch": 0.4483560278977084, + "grad_norm": 3.361008882522583, + "learning_rate": 4.383147369879631e-06, + "loss": 0.0207, + "step": 1350 + }, + { + "epoch": 0.46496180670873466, + "grad_norm": 0.00011381436343071982, + "learning_rate": 4.372037856598646e-06, + "loss": 0.0003, + "step": 1400 + }, + { + "epoch": 0.48156758551976087, + "grad_norm": 0.020212925970554352, + "learning_rate": 4.360928343317661e-06, + "loss": 0.0242, + "step": 1450 + }, + { + "epoch": 0.4981733643307871, + "grad_norm": 0.0014305580407381058, + "learning_rate": 4.3498188300366775e-06, + "loss": 0.0071, + "step": 1500 + }, + { + "epoch": 0.5147791431418134, + "grad_norm": 0.001460731728002429, + "learning_rate": 4.338709316755692e-06, + "loss": 0.0002, + "step": 1550 + }, + { + "epoch": 0.5313849219528396, + "grad_norm": 7.029194355010986, + "learning_rate": 4.327599803474708e-06, + "loss": 0.0236, + "step": 1600 + }, + { + "epoch": 0.5479907007638658, + "grad_norm": 0.0009986236691474915, + "learning_rate": 4.3164902901937234e-06, + "loss": 0.036, + "step": 1650 + }, + { + "epoch": 0.564596479574892, + "grad_norm": 0.001286104554310441, + "learning_rate": 4.305380776912739e-06, + "loss": 0.0009, + "step": 1700 + }, + { + "epoch": 0.5812022583859183, + "grad_norm": 0.0004588727024383843, + "learning_rate": 4.294271263631755e-06, + "loss": 0.0025, + "step": 1750 + }, + { + "epoch": 0.5978080371969445, + "grad_norm": 0.00801041629165411, + "learning_rate": 4.28316175035077e-06, + "loss": 0.0143, + "step": 1800 + }, + { + "epoch": 0.6144138160079707, + "grad_norm": 0.005204482469707727, + "learning_rate": 4.272052237069786e-06, + "loss": 0.021, + "step": 1850 + }, + { + "epoch": 0.631019594818997, + "grad_norm": 0.004308747593313456, + "learning_rate": 4.260942723788801e-06, + "loss": 0.0014, + "step": 1900 + }, + { + "epoch": 0.6476253736300233, + "grad_norm": 0.0009348586900159717, + "learning_rate": 4.249833210507817e-06, + "loss": 0.0078, + "step": 1950 + }, + { + "epoch": 0.6642311524410495, + "grad_norm": 0.0001416715094819665, + "learning_rate": 4.238723697226832e-06, + "loss": 0.0, + "step": 2000 + }, + { + "epoch": 0.6808369312520757, + "grad_norm": 0.0011341192293912172, + "learning_rate": 4.227614183945847e-06, + "loss": 0.0267, + "step": 2050 + }, + { + "epoch": 0.697442710063102, + "grad_norm": 0.00011227472714381292, + "learning_rate": 4.216504670664863e-06, + "loss": 0.0022, + "step": 2100 + }, + { + "epoch": 0.7140484888741282, + "grad_norm": 6.859990389784798e-05, + "learning_rate": 4.205395157383878e-06, + "loss": 0.0211, + "step": 2150 + }, + { + "epoch": 0.7306542676851544, + "grad_norm": 0.0015743171097710729, + "learning_rate": 4.194285644102894e-06, + "loss": 0.0002, + "step": 2200 + }, + { + "epoch": 0.7472600464961807, + "grad_norm": 2.1428555555758066e-05, + "learning_rate": 4.18317613082191e-06, + "loss": 0.0192, + "step": 2250 + }, + { + "epoch": 0.7638658253072069, + "grad_norm": 0.002894976641982794, + "learning_rate": 4.172066617540924e-06, + "loss": 0.0188, + "step": 2300 + }, + { + "epoch": 0.7804716041182331, + "grad_norm": 0.05406679958105087, + "learning_rate": 4.160957104259941e-06, + "loss": 0.0056, + "step": 2350 + }, + { + "epoch": 0.7970773829292593, + "grad_norm": 4.45985933765769e-05, + "learning_rate": 4.149847590978956e-06, + "loss": 0.0007, + "step": 2400 + }, + { + "epoch": 0.8136831617402857, + "grad_norm": 0.0006716567440889776, + "learning_rate": 4.138738077697971e-06, + "loss": 0.0, + "step": 2450 + }, + { + "epoch": 0.8302889405513119, + "grad_norm": 3.945700154872611e-05, + "learning_rate": 4.1276285644169875e-06, + "loss": 0.0, + "step": 2500 + }, + { + "epoch": 0.8468947193623381, + "grad_norm": 0.016212645918130875, + "learning_rate": 4.116519051136002e-06, + "loss": 0.0153, + "step": 2550 + }, + { + "epoch": 0.8635004981733644, + "grad_norm": 0.00015812188212294132, + "learning_rate": 4.105409537855018e-06, + "loss": 0.0116, + "step": 2600 + }, + { + "epoch": 0.8801062769843906, + "grad_norm": 0.0008818822097964585, + "learning_rate": 4.094300024574033e-06, + "loss": 0.0486, + "step": 2650 + }, + { + "epoch": 0.8967120557954168, + "grad_norm": 0.0006027265917509794, + "learning_rate": 4.083190511293049e-06, + "loss": 0.0002, + "step": 2700 + }, + { + "epoch": 0.913317834606443, + "grad_norm": 8.562572475057095e-05, + "learning_rate": 4.0720809980120645e-06, + "loss": 0.0027, + "step": 2750 + }, + { + "epoch": 0.9299236134174693, + "grad_norm": 0.000113841233542189, + "learning_rate": 4.06097148473108e-06, + "loss": 0.0008, + "step": 2800 + }, + { + "epoch": 0.9465293922284955, + "grad_norm": 0.024305831640958786, + "learning_rate": 4.049861971450095e-06, + "loss": 0.0, + "step": 2850 + }, + { + "epoch": 0.9631351710395217, + "grad_norm": 9.622455596923828, + "learning_rate": 4.0387524581691104e-06, + "loss": 0.0007, + "step": 2900 + }, + { + "epoch": 0.9797409498505479, + "grad_norm": 13.132140159606934, + "learning_rate": 4.027642944888126e-06, + "loss": 0.0345, + "step": 2950 + }, + { + "epoch": 0.9963467286615743, + "grad_norm": 0.00010690866474760696, + "learning_rate": 4.016533431607142e-06, + "loss": 0.0006, + "step": 3000 + }, + { + "epoch": 1.0, + "eval_accuracy": 0.9968988549618321, + "eval_f1": 0.9968942616229051, + "eval_loss": 0.02605549804866314, + "eval_precision": 0.9968904067400857, + "eval_recall": 0.9968988549618321, + "eval_runtime": 67.8691, + "eval_samples_per_second": 123.532, + "eval_steps_per_second": 7.721, + "step": 3011 + } + ], + "logging_steps": 50, + "max_steps": 21077, + "num_input_tokens_seen": 0, + "num_train_epochs": 7, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 5.094247305025536e+16, + "train_batch_size": 16, + "trial_name": null, + "trial_params": null +} diff --git a/trial-7/checkpoint-3011/training_args.bin b/trial-7/checkpoint-3011/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..255ef37a262794ff856a1cc284f522f7fe7fa04a --- /dev/null +++ b/trial-7/checkpoint-3011/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21ed8149f0719675be0bf107366eed49ad0dafe3f8d5e9711b2ccadaed21578f +size 5368 diff --git a/trial-8/checkpoint-12044/config.json b/trial-8/checkpoint-12044/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/trial-8/checkpoint-12044/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/trial-8/checkpoint-12044/model.safetensors b/trial-8/checkpoint-12044/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ff8ba1f6ce7974a1ee799da536d1b2a4576b9e82 --- /dev/null +++ b/trial-8/checkpoint-12044/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6bbdf735675bb2bcf42b0650292480cc6fb694cb27cb5ad045c7352732d89fb +size 1583351632 diff --git a/trial-8/checkpoint-12044/optimizer.pt b/trial-8/checkpoint-12044/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..dc1083b3efc655f2c43a1e0befb3deba782ba0f4 --- /dev/null +++ b/trial-8/checkpoint-12044/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22baae1c9922eb26ceae1f9297798c2d237b16a29a1ce8bff535bb2f66b41805 +size 3166813178 diff --git a/trial-8/checkpoint-12044/rng_state.pth b/trial-8/checkpoint-12044/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..a66ad1c552617ec104e4fc9635ea86bdfde3fd1b --- /dev/null +++ b/trial-8/checkpoint-12044/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b85e468a0db3c543df3103ed7dfb7428f0c8472d244e4b040e18fdfbd2543cd +size 14244 diff --git a/trial-8/checkpoint-12044/scheduler.pt b/trial-8/checkpoint-12044/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..c1a5afda0ae682a82fee35e0ad7dea039f6d33da --- /dev/null +++ b/trial-8/checkpoint-12044/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b9fb276c931af3e2c8d905a326b32f7601bcd62de03c83c5f9d5fa65acb04d3 +size 1064 diff --git a/trial-8/checkpoint-12044/trainer_state.json b/trial-8/checkpoint-12044/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..a2178e34f54202e4f0a9d6962ad09c9365c32639 --- /dev/null +++ b/trial-8/checkpoint-12044/trainer_state.json @@ -0,0 +1,1761 @@ +{ + "best_metric": 0.0274968221783638, + "best_model_checkpoint": "./results/answerdotai/ModernBERT-large/trial-8/checkpoint-12044", + "epoch": 4.0, + "eval_steps": 500, + "global_step": 12044, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.016605778811026237, + "grad_norm": 48.80170440673828, + "learning_rate": 5.442069277611984e-06, + "loss": 0.3999, + "step": 50 + }, + { + "epoch": 0.033211557622052475, + "grad_norm": 8.915654182434082, + "learning_rate": 5.426965843782023e-06, + "loss": 0.2085, + "step": 100 + }, + { + "epoch": 0.04981733643307871, + "grad_norm": 17.907716751098633, + "learning_rate": 5.41186240995206e-06, + "loss": 0.152, + "step": 150 + }, + { + "epoch": 0.06642311524410495, + "grad_norm": 0.6026399731636047, + "learning_rate": 5.396758976122098e-06, + "loss": 0.0975, + "step": 200 + }, + { + "epoch": 0.08302889405513118, + "grad_norm": 0.017961109057068825, + "learning_rate": 5.381655542292135e-06, + "loss": 0.0348, + "step": 250 + }, + { + "epoch": 0.09963467286615742, + "grad_norm": 0.004240815062075853, + "learning_rate": 5.366552108462174e-06, + "loss": 0.0876, + "step": 300 + }, + { + "epoch": 0.11624045167718366, + "grad_norm": 0.09322185814380646, + "learning_rate": 5.351448674632211e-06, + "loss": 0.0434, + "step": 350 + }, + { + "epoch": 0.1328462304882099, + "grad_norm": 0.0315406508743763, + "learning_rate": 5.336345240802249e-06, + "loss": 0.0447, + "step": 400 + }, + { + "epoch": 0.14945200929923613, + "grad_norm": 0.0029612442012876272, + "learning_rate": 5.321241806972287e-06, + "loss": 0.0518, + "step": 450 + }, + { + "epoch": 0.16605778811026237, + "grad_norm": 42.15907669067383, + "learning_rate": 5.306138373142325e-06, + "loss": 0.0455, + "step": 500 + }, + { + "epoch": 0.1826635669212886, + "grad_norm": 0.0030956254340708256, + "learning_rate": 5.2910349393123625e-06, + "loss": 0.0319, + "step": 550 + }, + { + "epoch": 0.19926934573231483, + "grad_norm": 1.1757360696792603, + "learning_rate": 5.2759315054824e-06, + "loss": 0.0419, + "step": 600 + }, + { + "epoch": 0.2158751245433411, + "grad_norm": 0.03680999204516411, + "learning_rate": 5.2608280716524385e-06, + "loss": 0.0421, + "step": 650 + }, + { + "epoch": 0.23248090335436733, + "grad_norm": 0.0038643060252070427, + "learning_rate": 5.245724637822476e-06, + "loss": 0.0536, + "step": 700 + }, + { + "epoch": 0.24908668216539356, + "grad_norm": 0.20435115694999695, + "learning_rate": 5.230621203992514e-06, + "loss": 0.03, + "step": 750 + }, + { + "epoch": 0.2656924609764198, + "grad_norm": 0.0012974872952327132, + "learning_rate": 5.215517770162552e-06, + "loss": 0.0507, + "step": 800 + }, + { + "epoch": 0.282298239787446, + "grad_norm": 58.780330657958984, + "learning_rate": 5.20041433633259e-06, + "loss": 0.0405, + "step": 850 + }, + { + "epoch": 0.29890401859847227, + "grad_norm": 0.04035910218954086, + "learning_rate": 5.185310902502627e-06, + "loss": 0.0445, + "step": 900 + }, + { + "epoch": 0.3155097974094985, + "grad_norm": 0.16344955563545227, + "learning_rate": 5.170207468672665e-06, + "loss": 0.0198, + "step": 950 + }, + { + "epoch": 0.33211557622052473, + "grad_norm": 0.0037070370744913816, + "learning_rate": 5.155104034842703e-06, + "loss": 0.0346, + "step": 1000 + }, + { + "epoch": 0.348721355031551, + "grad_norm": 0.0007356529822573066, + "learning_rate": 5.140000601012741e-06, + "loss": 0.0323, + "step": 1050 + }, + { + "epoch": 0.3653271338425772, + "grad_norm": 9.303622209699824e-05, + "learning_rate": 5.124897167182778e-06, + "loss": 0.008, + "step": 1100 + }, + { + "epoch": 0.38193291265360346, + "grad_norm": 0.007506866008043289, + "learning_rate": 5.109793733352816e-06, + "loss": 0.0495, + "step": 1150 + }, + { + "epoch": 0.39853869146462967, + "grad_norm": 0.0005906258593313396, + "learning_rate": 5.094690299522854e-06, + "loss": 0.0102, + "step": 1200 + }, + { + "epoch": 0.41514447027565593, + "grad_norm": 0.19435827434062958, + "learning_rate": 5.079586865692891e-06, + "loss": 0.0103, + "step": 1250 + }, + { + "epoch": 0.4317502490866822, + "grad_norm": 0.29838407039642334, + "learning_rate": 5.064483431862929e-06, + "loss": 0.0495, + "step": 1300 + }, + { + "epoch": 0.4483560278977084, + "grad_norm": 4.454320907592773, + "learning_rate": 5.049379998032967e-06, + "loss": 0.0201, + "step": 1350 + }, + { + "epoch": 0.46496180670873466, + "grad_norm": 0.00048437301302328706, + "learning_rate": 5.034276564203005e-06, + "loss": 0.0017, + "step": 1400 + }, + { + "epoch": 0.48156758551976087, + "grad_norm": 0.003737648716196418, + "learning_rate": 5.0191731303730425e-06, + "loss": 0.0276, + "step": 1450 + }, + { + "epoch": 0.4981733643307871, + "grad_norm": 0.00028460309840738773, + "learning_rate": 5.0040696965430805e-06, + "loss": 0.0032, + "step": 1500 + }, + { + "epoch": 0.5147791431418134, + "grad_norm": 0.018163174390792847, + "learning_rate": 4.9889662627131185e-06, + "loss": 0.0032, + "step": 1550 + }, + { + "epoch": 0.5313849219528396, + "grad_norm": 6.737831115722656, + "learning_rate": 4.9738628288831566e-06, + "loss": 0.0212, + "step": 1600 + }, + { + "epoch": 0.5479907007638658, + "grad_norm": 0.00024021146236918867, + "learning_rate": 4.958759395053194e-06, + "loss": 0.0335, + "step": 1650 + }, + { + "epoch": 0.564596479574892, + "grad_norm": 0.0005020114476792514, + "learning_rate": 4.943655961223232e-06, + "loss": 0.0004, + "step": 1700 + }, + { + "epoch": 0.5812022583859183, + "grad_norm": 0.00025337134138680995, + "learning_rate": 4.92855252739327e-06, + "loss": 0.0034, + "step": 1750 + }, + { + "epoch": 0.5978080371969445, + "grad_norm": 0.00022687511227559298, + "learning_rate": 4.913449093563307e-06, + "loss": 0.0103, + "step": 1800 + }, + { + "epoch": 0.6144138160079707, + "grad_norm": 0.006733611226081848, + "learning_rate": 4.898345659733345e-06, + "loss": 0.0197, + "step": 1850 + }, + { + "epoch": 0.631019594818997, + "grad_norm": 0.004075032193213701, + "learning_rate": 4.883242225903383e-06, + "loss": 0.008, + "step": 1900 + }, + { + "epoch": 0.6476253736300233, + "grad_norm": 0.0022155444603413343, + "learning_rate": 4.868138792073421e-06, + "loss": 0.004, + "step": 1950 + }, + { + "epoch": 0.6642311524410495, + "grad_norm": 0.00036354802432470024, + "learning_rate": 4.853035358243458e-06, + "loss": 0.0012, + "step": 2000 + }, + { + "epoch": 0.6808369312520757, + "grad_norm": 0.000720661657396704, + "learning_rate": 4.837931924413496e-06, + "loss": 0.0156, + "step": 2050 + }, + { + "epoch": 0.697442710063102, + "grad_norm": 0.00012774542847182602, + "learning_rate": 4.822828490583534e-06, + "loss": 0.0038, + "step": 2100 + }, + { + "epoch": 0.7140484888741282, + "grad_norm": 0.000228753182454966, + "learning_rate": 4.807725056753571e-06, + "loss": 0.0109, + "step": 2150 + }, + { + "epoch": 0.7306542676851544, + "grad_norm": 2.7858286557602696e-05, + "learning_rate": 4.792621622923609e-06, + "loss": 0.0036, + "step": 2200 + }, + { + "epoch": 0.7472600464961807, + "grad_norm": 4.053724478580989e-05, + "learning_rate": 4.777518189093647e-06, + "loss": 0.0087, + "step": 2250 + }, + { + "epoch": 0.7638658253072069, + "grad_norm": 0.010803287848830223, + "learning_rate": 4.762414755263685e-06, + "loss": 0.015, + "step": 2300 + }, + { + "epoch": 0.7804716041182331, + "grad_norm": 0.005179752130061388, + "learning_rate": 4.7473113214337225e-06, + "loss": 0.0044, + "step": 2350 + }, + { + "epoch": 0.7970773829292593, + "grad_norm": 0.0006959940074011683, + "learning_rate": 4.7322078876037605e-06, + "loss": 0.0172, + "step": 2400 + }, + { + "epoch": 0.8136831617402857, + "grad_norm": 3.6501307477010414e-05, + "learning_rate": 4.7171044537737986e-06, + "loss": 0.0, + "step": 2450 + }, + { + "epoch": 0.8302889405513119, + "grad_norm": 5.4570980864809826e-05, + "learning_rate": 4.7020010199438366e-06, + "loss": 0.0006, + "step": 2500 + }, + { + "epoch": 0.8468947193623381, + "grad_norm": 0.009553221985697746, + "learning_rate": 4.686897586113874e-06, + "loss": 0.0419, + "step": 2550 + }, + { + "epoch": 0.8635004981733644, + "grad_norm": 0.00020860738004557788, + "learning_rate": 4.671794152283912e-06, + "loss": 0.0104, + "step": 2600 + }, + { + "epoch": 0.8801062769843906, + "grad_norm": 0.0002856598002836108, + "learning_rate": 4.65669071845395e-06, + "loss": 0.0249, + "step": 2650 + }, + { + "epoch": 0.8967120557954168, + "grad_norm": 0.0002604172914288938, + "learning_rate": 4.641587284623987e-06, + "loss": 0.001, + "step": 2700 + }, + { + "epoch": 0.913317834606443, + "grad_norm": 5.387173587223515e-05, + "learning_rate": 4.626483850794025e-06, + "loss": 0.0001, + "step": 2750 + }, + { + "epoch": 0.9299236134174693, + "grad_norm": 4.0213195461547e-05, + "learning_rate": 4.611380416964063e-06, + "loss": 0.006, + "step": 2800 + }, + { + "epoch": 0.9465293922284955, + "grad_norm": 0.003638714551925659, + "learning_rate": 4.596276983134101e-06, + "loss": 0.0001, + "step": 2850 + }, + { + "epoch": 0.9631351710395217, + "grad_norm": 0.29953381419181824, + "learning_rate": 4.581173549304138e-06, + "loss": 0.0001, + "step": 2900 + }, + { + "epoch": 0.9797409498505479, + "grad_norm": 17.772207260131836, + "learning_rate": 4.566070115474176e-06, + "loss": 0.0233, + "step": 2950 + }, + { + "epoch": 0.9963467286615743, + "grad_norm": 0.0010157329961657524, + "learning_rate": 4.550966681644214e-06, + "loss": 0.0055, + "step": 3000 + }, + { + "epoch": 1.0, + "eval_accuracy": 0.9961832061068703, + "eval_f1": 0.9961944045354951, + "eval_loss": 0.031067248433828354, + "eval_precision": 0.9962084795779106, + "eval_recall": 0.9961832061068703, + "eval_runtime": 70.2789, + "eval_samples_per_second": 119.296, + "eval_steps_per_second": 7.456, + "step": 3011 + }, + { + "epoch": 1.0129525074726005, + "grad_norm": 0.0008370025898329914, + "learning_rate": 4.535863247814252e-06, + "loss": 0.0003, + "step": 3050 + }, + { + "epoch": 1.0295582862836268, + "grad_norm": 0.0001854402362369001, + "learning_rate": 4.520759813984289e-06, + "loss": 0.0, + "step": 3100 + }, + { + "epoch": 1.0461640650946529, + "grad_norm": 0.007859999313950539, + "learning_rate": 4.505656380154327e-06, + "loss": 0.0037, + "step": 3150 + }, + { + "epoch": 1.0627698439056792, + "grad_norm": 0.0009850228670984507, + "learning_rate": 4.490552946324365e-06, + "loss": 0.0047, + "step": 3200 + }, + { + "epoch": 1.0793756227167055, + "grad_norm": 0.019813211634755135, + "learning_rate": 4.4754495124944025e-06, + "loss": 0.0109, + "step": 3250 + }, + { + "epoch": 1.0959814015277316, + "grad_norm": 0.0002668738889042288, + "learning_rate": 4.4603460786644406e-06, + "loss": 0.0027, + "step": 3300 + }, + { + "epoch": 1.112587180338758, + "grad_norm": 2.7341079658071976e-06, + "learning_rate": 4.4452426448344786e-06, + "loss": 0.0006, + "step": 3350 + }, + { + "epoch": 1.1291929591497842, + "grad_norm": 1.898204027384054e-05, + "learning_rate": 4.430139211004517e-06, + "loss": 0.0, + "step": 3400 + }, + { + "epoch": 1.1457987379608103, + "grad_norm": 1.0880743502639234e-05, + "learning_rate": 4.415035777174554e-06, + "loss": 0.0076, + "step": 3450 + }, + { + "epoch": 1.1624045167718366, + "grad_norm": 0.0007442326168529689, + "learning_rate": 4.399932343344592e-06, + "loss": 0.0114, + "step": 3500 + }, + { + "epoch": 1.1790102955828627, + "grad_norm": 0.00044301379239186645, + "learning_rate": 4.38482890951463e-06, + "loss": 0.0, + "step": 3550 + }, + { + "epoch": 1.195616074393889, + "grad_norm": 1.4020783964951988e-05, + "learning_rate": 4.369725475684668e-06, + "loss": 0.0101, + "step": 3600 + }, + { + "epoch": 1.2122218532049154, + "grad_norm": 13.00955867767334, + "learning_rate": 4.354622041854705e-06, + "loss": 0.0065, + "step": 3650 + }, + { + "epoch": 1.2288276320159415, + "grad_norm": 1.9742499716812745e-05, + "learning_rate": 4.339518608024743e-06, + "loss": 0.0095, + "step": 3700 + }, + { + "epoch": 1.2454334108269678, + "grad_norm": 5.559636701946147e-05, + "learning_rate": 4.324415174194781e-06, + "loss": 0.0237, + "step": 3750 + }, + { + "epoch": 1.2620391896379939, + "grad_norm": 5.4768162954133004e-05, + "learning_rate": 4.309311740364818e-06, + "loss": 0.0, + "step": 3800 + }, + { + "epoch": 1.2786449684490202, + "grad_norm": 0.0005510418559424579, + "learning_rate": 4.294208306534856e-06, + "loss": 0.0, + "step": 3850 + }, + { + "epoch": 1.2952507472600465, + "grad_norm": 0.0012520917225629091, + "learning_rate": 4.279104872704894e-06, + "loss": 0.0, + "step": 3900 + }, + { + "epoch": 1.3118565260710726, + "grad_norm": 0.004351610783487558, + "learning_rate": 4.264001438874932e-06, + "loss": 0.0001, + "step": 3950 + }, + { + "epoch": 1.328462304882099, + "grad_norm": 7.022717909421772e-05, + "learning_rate": 4.248898005044969e-06, + "loss": 0.01, + "step": 4000 + }, + { + "epoch": 1.3450680836931252, + "grad_norm": 0.0009612261201255023, + "learning_rate": 4.233794571215007e-06, + "loss": 0.0019, + "step": 4050 + }, + { + "epoch": 1.3616738625041513, + "grad_norm": 4.9249112635152414e-05, + "learning_rate": 4.218691137385045e-06, + "loss": 0.0045, + "step": 4100 + }, + { + "epoch": 1.3782796413151777, + "grad_norm": 3.0692669952259166e-06, + "learning_rate": 4.203587703555083e-06, + "loss": 0.001, + "step": 4150 + }, + { + "epoch": 1.394885420126204, + "grad_norm": 7.633322638866957e-06, + "learning_rate": 4.1884842697251206e-06, + "loss": 0.0001, + "step": 4200 + }, + { + "epoch": 1.41149119893723, + "grad_norm": 1.2652909390453715e-05, + "learning_rate": 4.1733808358951594e-06, + "loss": 0.0007, + "step": 4250 + }, + { + "epoch": 1.4280969777482564, + "grad_norm": 5.906139631406404e-05, + "learning_rate": 4.158277402065197e-06, + "loss": 0.0304, + "step": 4300 + }, + { + "epoch": 1.4447027565592827, + "grad_norm": 0.00537989754229784, + "learning_rate": 4.143173968235234e-06, + "loss": 0.0378, + "step": 4350 + }, + { + "epoch": 1.4613085353703088, + "grad_norm": 0.0006592119461856782, + "learning_rate": 4.128070534405272e-06, + "loss": 0.0051, + "step": 4400 + }, + { + "epoch": 1.4779143141813351, + "grad_norm": 0.00047999247908592224, + "learning_rate": 4.11296710057531e-06, + "loss": 0.0001, + "step": 4450 + }, + { + "epoch": 1.4945200929923614, + "grad_norm": 0.0008564750314690173, + "learning_rate": 4.097863666745348e-06, + "loss": 0.0, + "step": 4500 + }, + { + "epoch": 1.5111258718033875, + "grad_norm": 0.00031443778425455093, + "learning_rate": 4.082760232915385e-06, + "loss": 0.0014, + "step": 4550 + }, + { + "epoch": 1.5277316506144138, + "grad_norm": 0.00020036890055052936, + "learning_rate": 4.067656799085424e-06, + "loss": 0.0027, + "step": 4600 + }, + { + "epoch": 1.5443374294254402, + "grad_norm": 5.1619044825201854e-05, + "learning_rate": 4.052553365255461e-06, + "loss": 0.0001, + "step": 4650 + }, + { + "epoch": 1.5609432082364663, + "grad_norm": 1.1955181435041595e-05, + "learning_rate": 4.037449931425499e-06, + "loss": 0.0, + "step": 4700 + }, + { + "epoch": 1.5775489870474926, + "grad_norm": 0.00013191364996600896, + "learning_rate": 4.022346497595536e-06, + "loss": 0.0, + "step": 4750 + }, + { + "epoch": 1.594154765858519, + "grad_norm": 4.815014563064324e-06, + "learning_rate": 4.007243063765575e-06, + "loss": 0.0, + "step": 4800 + }, + { + "epoch": 1.610760544669545, + "grad_norm": 0.0005532304057851434, + "learning_rate": 3.992139629935612e-06, + "loss": 0.0001, + "step": 4850 + }, + { + "epoch": 1.627366323480571, + "grad_norm": 0.001612741849385202, + "learning_rate": 3.977036196105649e-06, + "loss": 0.0027, + "step": 4900 + }, + { + "epoch": 1.6439721022915976, + "grad_norm": 1.2691608390014153e-05, + "learning_rate": 3.961932762275688e-06, + "loss": 0.0001, + "step": 4950 + }, + { + "epoch": 1.6605778811026237, + "grad_norm": 1.8307471691514365e-05, + "learning_rate": 3.946829328445725e-06, + "loss": 0.0, + "step": 5000 + }, + { + "epoch": 1.6771836599136498, + "grad_norm": 0.00032624322921037674, + "learning_rate": 3.931725894615763e-06, + "loss": 0.0, + "step": 5050 + }, + { + "epoch": 1.6937894387246761, + "grad_norm": 0.0010627523297443986, + "learning_rate": 3.9166224607858006e-06, + "loss": 0.0033, + "step": 5100 + }, + { + "epoch": 1.7103952175357025, + "grad_norm": 0.013439467176795006, + "learning_rate": 3.9015190269558394e-06, + "loss": 0.0001, + "step": 5150 + }, + { + "epoch": 1.7270009963467285, + "grad_norm": 2.326617504877504e-05, + "learning_rate": 3.886415593125877e-06, + "loss": 0.012, + "step": 5200 + }, + { + "epoch": 1.7436067751577549, + "grad_norm": 0.00021617028687614948, + "learning_rate": 3.871312159295915e-06, + "loss": 0.0039, + "step": 5250 + }, + { + "epoch": 1.7602125539687812, + "grad_norm": 0.00042225251672789454, + "learning_rate": 3.856208725465953e-06, + "loss": 0.0024, + "step": 5300 + }, + { + "epoch": 1.7768183327798073, + "grad_norm": 2.684223545657005e-05, + "learning_rate": 3.841105291635991e-06, + "loss": 0.0107, + "step": 5350 + }, + { + "epoch": 1.7934241115908336, + "grad_norm": 2.1881678549107164e-06, + "learning_rate": 3.826001857806028e-06, + "loss": 0.0001, + "step": 5400 + }, + { + "epoch": 1.81002989040186, + "grad_norm": 0.0001719454739941284, + "learning_rate": 3.8108984239760654e-06, + "loss": 0.0002, + "step": 5450 + }, + { + "epoch": 1.826635669212886, + "grad_norm": 5.180573134566657e-05, + "learning_rate": 3.7957949901461034e-06, + "loss": 0.0169, + "step": 5500 + }, + { + "epoch": 1.8432414480239123, + "grad_norm": 0.009630163200199604, + "learning_rate": 3.780691556316141e-06, + "loss": 0.0174, + "step": 5550 + }, + { + "epoch": 1.8598472268349386, + "grad_norm": 0.000130218337289989, + "learning_rate": 3.765588122486179e-06, + "loss": 0.0, + "step": 5600 + }, + { + "epoch": 1.8764530056459647, + "grad_norm": 0.0013764349278062582, + "learning_rate": 3.7504846886562166e-06, + "loss": 0.0387, + "step": 5650 + }, + { + "epoch": 1.893058784456991, + "grad_norm": 0.00011296494631096721, + "learning_rate": 3.7353812548262546e-06, + "loss": 0.0076, + "step": 5700 + }, + { + "epoch": 1.9096645632680174, + "grad_norm": 0.00044875897583551705, + "learning_rate": 3.7202778209962922e-06, + "loss": 0.0, + "step": 5750 + }, + { + "epoch": 1.9262703420790435, + "grad_norm": 0.0002820456284098327, + "learning_rate": 3.7051743871663302e-06, + "loss": 0.0098, + "step": 5800 + }, + { + "epoch": 1.9428761208900698, + "grad_norm": 0.0017261169850826263, + "learning_rate": 3.690070953336368e-06, + "loss": 0.0048, + "step": 5850 + }, + { + "epoch": 1.959481899701096, + "grad_norm": 0.0021717797499150038, + "learning_rate": 3.674967519506406e-06, + "loss": 0.0001, + "step": 5900 + }, + { + "epoch": 1.9760876785121222, + "grad_norm": 0.0007332819513976574, + "learning_rate": 3.6598640856764434e-06, + "loss": 0.0019, + "step": 5950 + }, + { + "epoch": 1.9926934573231485, + "grad_norm": 0.00039128505159169436, + "learning_rate": 3.644760651846481e-06, + "loss": 0.0003, + "step": 6000 + }, + { + "epoch": 2.0, + "eval_accuracy": 0.9976145038167938, + "eval_f1": 0.9976038350977028, + "eval_loss": 0.028184829279780388, + "eval_precision": 0.9975999534801656, + "eval_recall": 0.9976145038167938, + "eval_runtime": 67.8568, + "eval_samples_per_second": 123.554, + "eval_steps_per_second": 7.722, + "step": 6022 + }, + { + "epoch": 2.009299236134175, + "grad_norm": 0.0002182232856284827, + "learning_rate": 3.629657218016519e-06, + "loss": 0.0, + "step": 6050 + }, + { + "epoch": 2.025905014945201, + "grad_norm": 1.5974823327269405e-05, + "learning_rate": 3.6145537841865566e-06, + "loss": 0.0, + "step": 6100 + }, + { + "epoch": 2.042510793756227, + "grad_norm": 8.559755951864645e-05, + "learning_rate": 3.5994503503565946e-06, + "loss": 0.0, + "step": 6150 + }, + { + "epoch": 2.0591165725672536, + "grad_norm": 2.975755160150584e-05, + "learning_rate": 3.5843469165266322e-06, + "loss": 0.0, + "step": 6200 + }, + { + "epoch": 2.0757223513782797, + "grad_norm": 0.0016021888004615903, + "learning_rate": 3.5692434826966702e-06, + "loss": 0.0001, + "step": 6250 + }, + { + "epoch": 2.0923281301893057, + "grad_norm": 1.926331788126845e-05, + "learning_rate": 3.554140048866708e-06, + "loss": 0.02, + "step": 6300 + }, + { + "epoch": 2.1089339090003323, + "grad_norm": 5.0432750867912546e-05, + "learning_rate": 3.5390366150367463e-06, + "loss": 0.0014, + "step": 6350 + }, + { + "epoch": 2.1255396878113584, + "grad_norm": 0.00012250966392457485, + "learning_rate": 3.5239331812067834e-06, + "loss": 0.0001, + "step": 6400 + }, + { + "epoch": 2.1421454666223845, + "grad_norm": 0.0031545101664960384, + "learning_rate": 3.508829747376821e-06, + "loss": 0.0014, + "step": 6450 + }, + { + "epoch": 2.158751245433411, + "grad_norm": 8.892184268916026e-06, + "learning_rate": 3.493726313546859e-06, + "loss": 0.0122, + "step": 6500 + }, + { + "epoch": 2.175357024244437, + "grad_norm": 4.931144212605432e-05, + "learning_rate": 3.4786228797168966e-06, + "loss": 0.0, + "step": 6550 + }, + { + "epoch": 2.191962803055463, + "grad_norm": 0.0001632977946428582, + "learning_rate": 3.4635194458869346e-06, + "loss": 0.0088, + "step": 6600 + }, + { + "epoch": 2.2085685818664897, + "grad_norm": 5.291106936056167e-05, + "learning_rate": 3.4484160120569722e-06, + "loss": 0.0134, + "step": 6650 + }, + { + "epoch": 2.225174360677516, + "grad_norm": 0.0012922051828354597, + "learning_rate": 3.4333125782270107e-06, + "loss": 0.0021, + "step": 6700 + }, + { + "epoch": 2.241780139488542, + "grad_norm": 0.00011858004290843382, + "learning_rate": 3.418209144397048e-06, + "loss": 0.0, + "step": 6750 + }, + { + "epoch": 2.2583859182995685, + "grad_norm": 55.799949645996094, + "learning_rate": 3.4031057105670863e-06, + "loss": 0.003, + "step": 6800 + }, + { + "epoch": 2.2749916971105946, + "grad_norm": 1.2432922630978283e-05, + "learning_rate": 3.3880022767371234e-06, + "loss": 0.0002, + "step": 6850 + }, + { + "epoch": 2.2915974759216207, + "grad_norm": 1.9737499314942397e-05, + "learning_rate": 3.372898842907162e-06, + "loss": 0.0011, + "step": 6900 + }, + { + "epoch": 2.308203254732647, + "grad_norm": 6.67273998260498e-05, + "learning_rate": 3.357795409077199e-06, + "loss": 0.0, + "step": 6950 + }, + { + "epoch": 2.3248090335436733, + "grad_norm": 3.6073315641260706e-06, + "learning_rate": 3.3426919752472366e-06, + "loss": 0.0, + "step": 7000 + }, + { + "epoch": 2.3414148123546994, + "grad_norm": 5.72378994547762e-05, + "learning_rate": 3.327588541417275e-06, + "loss": 0.0216, + "step": 7050 + }, + { + "epoch": 2.3580205911657255, + "grad_norm": 5.8640736824600026e-05, + "learning_rate": 3.3124851075873122e-06, + "loss": 0.0, + "step": 7100 + }, + { + "epoch": 2.374626369976752, + "grad_norm": 5.57128332729917e-05, + "learning_rate": 3.2973816737573507e-06, + "loss": 0.0, + "step": 7150 + }, + { + "epoch": 2.391232148787778, + "grad_norm": 2.3135487936087884e-05, + "learning_rate": 3.282278239927388e-06, + "loss": 0.0, + "step": 7200 + }, + { + "epoch": 2.407837927598804, + "grad_norm": 5.598703137366101e-05, + "learning_rate": 3.2671748060974263e-06, + "loss": 0.0, + "step": 7250 + }, + { + "epoch": 2.4244437064098308, + "grad_norm": 2.7807505830423906e-05, + "learning_rate": 3.2520713722674634e-06, + "loss": 0.0, + "step": 7300 + }, + { + "epoch": 2.441049485220857, + "grad_norm": 5.711228004656732e-05, + "learning_rate": 3.236967938437502e-06, + "loss": 0.0, + "step": 7350 + }, + { + "epoch": 2.457655264031883, + "grad_norm": 4.2451065382920206e-05, + "learning_rate": 3.2218645046075395e-06, + "loss": 0.0, + "step": 7400 + }, + { + "epoch": 2.4742610428429095, + "grad_norm": 4.7442899813177064e-05, + "learning_rate": 3.2067610707775775e-06, + "loss": 0.0, + "step": 7450 + }, + { + "epoch": 2.4908668216539356, + "grad_norm": 0.0008124230080284178, + "learning_rate": 3.191657636947615e-06, + "loss": 0.0111, + "step": 7500 + }, + { + "epoch": 2.5074726004649617, + "grad_norm": 0.00031824593315832317, + "learning_rate": 3.1765542031176522e-06, + "loss": 0.0141, + "step": 7550 + }, + { + "epoch": 2.5240783792759878, + "grad_norm": 2.0334004148025997e-05, + "learning_rate": 3.1614507692876907e-06, + "loss": 0.0001, + "step": 7600 + }, + { + "epoch": 2.5406841580870143, + "grad_norm": 0.0011262138141319156, + "learning_rate": 3.146347335457728e-06, + "loss": 0.0, + "step": 7650 + }, + { + "epoch": 2.5572899368980404, + "grad_norm": 7.019140412012348e-06, + "learning_rate": 3.1312439016277663e-06, + "loss": 0.0001, + "step": 7700 + }, + { + "epoch": 2.5738957157090665, + "grad_norm": 4.291220466257073e-05, + "learning_rate": 3.116140467797804e-06, + "loss": 0.0, + "step": 7750 + }, + { + "epoch": 2.590501494520093, + "grad_norm": 4.21523473050911e-05, + "learning_rate": 3.101037033967842e-06, + "loss": 0.0, + "step": 7800 + }, + { + "epoch": 2.607107273331119, + "grad_norm": 6.554043466167059e-06, + "learning_rate": 3.0859336001378795e-06, + "loss": 0.0, + "step": 7850 + }, + { + "epoch": 2.6237130521421452, + "grad_norm": 3.164474401273765e-05, + "learning_rate": 3.0708301663079175e-06, + "loss": 0.0, + "step": 7900 + }, + { + "epoch": 2.6403188309531718, + "grad_norm": 9.753752237884328e-06, + "learning_rate": 3.055726732477955e-06, + "loss": 0.0, + "step": 7950 + }, + { + "epoch": 2.656924609764198, + "grad_norm": 0.00010515918984310701, + "learning_rate": 3.040623298647993e-06, + "loss": 0.0, + "step": 8000 + }, + { + "epoch": 2.673530388575224, + "grad_norm": 1.947501368704252e-05, + "learning_rate": 3.0255198648180307e-06, + "loss": 0.0, + "step": 8050 + }, + { + "epoch": 2.6901361673862505, + "grad_norm": 0.003008282510563731, + "learning_rate": 3.0104164309880683e-06, + "loss": 0.0, + "step": 8100 + }, + { + "epoch": 2.7067419461972766, + "grad_norm": 0.006155023351311684, + "learning_rate": 2.9953129971581063e-06, + "loss": 0.0137, + "step": 8150 + }, + { + "epoch": 2.7233477250083027, + "grad_norm": 0.00037434257683344185, + "learning_rate": 2.980209563328144e-06, + "loss": 0.0002, + "step": 8200 + }, + { + "epoch": 2.7399535038193292, + "grad_norm": 8.50517099024728e-05, + "learning_rate": 2.965106129498182e-06, + "loss": 0.0001, + "step": 8250 + }, + { + "epoch": 2.7565592826303553, + "grad_norm": 0.00013099303760100156, + "learning_rate": 2.9500026956682195e-06, + "loss": 0.0, + "step": 8300 + }, + { + "epoch": 2.7731650614413814, + "grad_norm": 0.0001120947563322261, + "learning_rate": 2.9348992618382575e-06, + "loss": 0.0, + "step": 8350 + }, + { + "epoch": 2.789770840252408, + "grad_norm": 0.0002777460322249681, + "learning_rate": 2.919795828008295e-06, + "loss": 0.0, + "step": 8400 + }, + { + "epoch": 2.806376619063434, + "grad_norm": 0.00042251782724633813, + "learning_rate": 2.904692394178333e-06, + "loss": 0.0, + "step": 8450 + }, + { + "epoch": 2.82298239787446, + "grad_norm": 3.1358853448182344e-05, + "learning_rate": 2.8895889603483707e-06, + "loss": 0.0, + "step": 8500 + }, + { + "epoch": 2.8395881766854867, + "grad_norm": 0.0007451030542142689, + "learning_rate": 2.8744855265184087e-06, + "loss": 0.0124, + "step": 8550 + }, + { + "epoch": 2.856193955496513, + "grad_norm": 0.00039325086981989443, + "learning_rate": 2.8593820926884463e-06, + "loss": 0.0, + "step": 8600 + }, + { + "epoch": 2.872799734307539, + "grad_norm": 0.0003086184442508966, + "learning_rate": 2.844278658858484e-06, + "loss": 0.0, + "step": 8650 + }, + { + "epoch": 2.8894055131185654, + "grad_norm": 0.00011556244862731546, + "learning_rate": 2.829175225028522e-06, + "loss": 0.0, + "step": 8700 + }, + { + "epoch": 2.9060112919295915, + "grad_norm": 0.0001225081505253911, + "learning_rate": 2.8140717911985595e-06, + "loss": 0.0, + "step": 8750 + }, + { + "epoch": 2.9226170707406176, + "grad_norm": 0.0001323948526987806, + "learning_rate": 2.7989683573685975e-06, + "loss": 0.0001, + "step": 8800 + }, + { + "epoch": 2.939222849551644, + "grad_norm": 0.00018296284542884678, + "learning_rate": 2.783864923538635e-06, + "loss": 0.0, + "step": 8850 + }, + { + "epoch": 2.9558286283626702, + "grad_norm": 5.0213868235005066e-05, + "learning_rate": 2.768761489708673e-06, + "loss": 0.0, + "step": 8900 + }, + { + "epoch": 2.9724344071736963, + "grad_norm": 7.030667620711029e-05, + "learning_rate": 2.7536580558787107e-06, + "loss": 0.0, + "step": 8950 + }, + { + "epoch": 2.989040185984723, + "grad_norm": 0.0009642278891988099, + "learning_rate": 2.7385546220487487e-06, + "loss": 0.0049, + "step": 9000 + }, + { + "epoch": 3.0, + "eval_accuracy": 0.9974952290076335, + "eval_f1": 0.9974706794195317, + "eval_loss": 0.03217455372214317, + "eval_precision": 0.9974786154491889, + "eval_recall": 0.9974952290076335, + "eval_runtime": 67.0641, + "eval_samples_per_second": 125.015, + "eval_steps_per_second": 7.813, + "step": 9033 + }, + { + "epoch": 3.005645964795749, + "grad_norm": 0.0006342845736071467, + "learning_rate": 2.7234511882187863e-06, + "loss": 0.0, + "step": 9050 + }, + { + "epoch": 3.022251743606775, + "grad_norm": 3.0738679015485104e-06, + "learning_rate": 2.708347754388824e-06, + "loss": 0.0002, + "step": 9100 + }, + { + "epoch": 3.0388575224178016, + "grad_norm": 0.00030696720932610333, + "learning_rate": 2.693244320558862e-06, + "loss": 0.0, + "step": 9150 + }, + { + "epoch": 3.0554633012288277, + "grad_norm": 9.7329871096008e-07, + "learning_rate": 2.6781408867288995e-06, + "loss": 0.0, + "step": 9200 + }, + { + "epoch": 3.072069080039854, + "grad_norm": 3.646068080342957e-06, + "learning_rate": 2.6630374528989375e-06, + "loss": 0.0, + "step": 9250 + }, + { + "epoch": 3.0886748588508803, + "grad_norm": 3.0643525406048866e-06, + "learning_rate": 2.6479340190689755e-06, + "loss": 0.0, + "step": 9300 + }, + { + "epoch": 3.1052806376619064, + "grad_norm": 7.582376838399796e-06, + "learning_rate": 2.632830585239013e-06, + "loss": 0.0, + "step": 9350 + }, + { + "epoch": 3.1218864164729325, + "grad_norm": 6.548184046550887e-06, + "learning_rate": 2.6177271514090507e-06, + "loss": 0.0, + "step": 9400 + }, + { + "epoch": 3.1384921952839586, + "grad_norm": 0.00010061210923595354, + "learning_rate": 2.6026237175790883e-06, + "loss": 0.0079, + "step": 9450 + }, + { + "epoch": 3.155097974094985, + "grad_norm": 7.847424967621919e-06, + "learning_rate": 2.5875202837491263e-06, + "loss": 0.0007, + "step": 9500 + }, + { + "epoch": 3.1717037529060113, + "grad_norm": 0.00012697964848484844, + "learning_rate": 2.572416849919164e-06, + "loss": 0.0, + "step": 9550 + }, + { + "epoch": 3.1883095317170373, + "grad_norm": 0.0023716590367257595, + "learning_rate": 2.557313416089202e-06, + "loss": 0.0, + "step": 9600 + }, + { + "epoch": 3.204915310528064, + "grad_norm": 7.6247142715146765e-06, + "learning_rate": 2.54220998225924e-06, + "loss": 0.0, + "step": 9650 + }, + { + "epoch": 3.22152108933909, + "grad_norm": 1.218132547364803e-05, + "learning_rate": 2.5271065484292775e-06, + "loss": 0.0, + "step": 9700 + }, + { + "epoch": 3.238126868150116, + "grad_norm": 2.6325127691961825e-05, + "learning_rate": 2.5120031145993155e-06, + "loss": 0.0, + "step": 9750 + }, + { + "epoch": 3.2547326469611426, + "grad_norm": 8.65807305672206e-05, + "learning_rate": 2.496899680769353e-06, + "loss": 0.0, + "step": 9800 + }, + { + "epoch": 3.2713384257721687, + "grad_norm": 7.907479448476806e-05, + "learning_rate": 2.481796246939391e-06, + "loss": 0.0, + "step": 9850 + }, + { + "epoch": 3.287944204583195, + "grad_norm": 1.084964424080681e-05, + "learning_rate": 2.4666928131094287e-06, + "loss": 0.0, + "step": 9900 + }, + { + "epoch": 3.3045499833942213, + "grad_norm": 3.668981889859424e-06, + "learning_rate": 2.4515893792794663e-06, + "loss": 0.0, + "step": 9950 + }, + { + "epoch": 3.3211557622052474, + "grad_norm": 0.008243892341852188, + "learning_rate": 2.4364859454495043e-06, + "loss": 0.0002, + "step": 10000 + }, + { + "epoch": 3.3377615410162735, + "grad_norm": 2.178689101128839e-05, + "learning_rate": 2.421382511619542e-06, + "loss": 0.0, + "step": 10050 + }, + { + "epoch": 3.3543673198273, + "grad_norm": 0.0018933096434921026, + "learning_rate": 2.40627907778958e-06, + "loss": 0.0, + "step": 10100 + }, + { + "epoch": 3.370973098638326, + "grad_norm": 5.356103883968899e-06, + "learning_rate": 2.3911756439596175e-06, + "loss": 0.0, + "step": 10150 + }, + { + "epoch": 3.3875788774493523, + "grad_norm": 0.006842803675681353, + "learning_rate": 2.3760722101296555e-06, + "loss": 0.0, + "step": 10200 + }, + { + "epoch": 3.404184656260379, + "grad_norm": 1.741836422297638e-05, + "learning_rate": 2.360968776299693e-06, + "loss": 0.0206, + "step": 10250 + }, + { + "epoch": 3.420790435071405, + "grad_norm": 1.896383400890045e-05, + "learning_rate": 2.345865342469731e-06, + "loss": 0.0, + "step": 10300 + }, + { + "epoch": 3.437396213882431, + "grad_norm": 3.9336646295851097e-05, + "learning_rate": 2.3307619086397687e-06, + "loss": 0.0009, + "step": 10350 + }, + { + "epoch": 3.454001992693457, + "grad_norm": 2.4336228307220154e-05, + "learning_rate": 2.3156584748098067e-06, + "loss": 0.0, + "step": 10400 + }, + { + "epoch": 3.4706077715044836, + "grad_norm": 2.094502860927605e-06, + "learning_rate": 2.3005550409798443e-06, + "loss": 0.0001, + "step": 10450 + }, + { + "epoch": 3.4872135503155097, + "grad_norm": 1.520580735814292e-05, + "learning_rate": 2.285451607149882e-06, + "loss": 0.0, + "step": 10500 + }, + { + "epoch": 3.503819329126536, + "grad_norm": 0.11434303224086761, + "learning_rate": 2.27034817331992e-06, + "loss": 0.0002, + "step": 10550 + }, + { + "epoch": 3.5204251079375624, + "grad_norm": 3.6159185583528597e-06, + "learning_rate": 2.2552447394899575e-06, + "loss": 0.017, + "step": 10600 + }, + { + "epoch": 3.5370308867485885, + "grad_norm": 3.091506414421019e-06, + "learning_rate": 2.2401413056599955e-06, + "loss": 0.0001, + "step": 10650 + }, + { + "epoch": 3.5536366655596145, + "grad_norm": 2.3690083253313787e-05, + "learning_rate": 2.225037871830033e-06, + "loss": 0.0153, + "step": 10700 + }, + { + "epoch": 3.570242444370641, + "grad_norm": 7.99979898147285e-05, + "learning_rate": 2.209934438000071e-06, + "loss": 0.0, + "step": 10750 + }, + { + "epoch": 3.586848223181667, + "grad_norm": 3.25123392030946e-06, + "learning_rate": 2.1948310041701087e-06, + "loss": 0.0004, + "step": 10800 + }, + { + "epoch": 3.6034540019926933, + "grad_norm": 0.0005248406087048352, + "learning_rate": 2.1797275703401468e-06, + "loss": 0.0, + "step": 10850 + }, + { + "epoch": 3.62005978080372, + "grad_norm": 6.938075239304453e-05, + "learning_rate": 2.1646241365101843e-06, + "loss": 0.0, + "step": 10900 + }, + { + "epoch": 3.636665559614746, + "grad_norm": 0.0003245340776629746, + "learning_rate": 2.1495207026802224e-06, + "loss": 0.015, + "step": 10950 + }, + { + "epoch": 3.653271338425772, + "grad_norm": 0.00026093522319570184, + "learning_rate": 2.13441726885026e-06, + "loss": 0.0001, + "step": 11000 + }, + { + "epoch": 3.6698771172367985, + "grad_norm": 3.259763980167918e-05, + "learning_rate": 2.1193138350202975e-06, + "loss": 0.0, + "step": 11050 + }, + { + "epoch": 3.6864828960478246, + "grad_norm": 4.6551769628422335e-05, + "learning_rate": 2.1042104011903355e-06, + "loss": 0.0, + "step": 11100 + }, + { + "epoch": 3.7030886748588507, + "grad_norm": 1.7444475588490604e-06, + "learning_rate": 2.089106967360373e-06, + "loss": 0.0, + "step": 11150 + }, + { + "epoch": 3.7196944536698773, + "grad_norm": 1.3187262084102258e-05, + "learning_rate": 2.074003533530411e-06, + "loss": 0.0001, + "step": 11200 + }, + { + "epoch": 3.7363002324809034, + "grad_norm": 0.0007097522029653192, + "learning_rate": 2.0589000997004487e-06, + "loss": 0.0282, + "step": 11250 + }, + { + "epoch": 3.7529060112919295, + "grad_norm": 0.000811311649158597, + "learning_rate": 2.0437966658704868e-06, + "loss": 0.0001, + "step": 11300 + }, + { + "epoch": 3.769511790102956, + "grad_norm": 5.775378303951584e-05, + "learning_rate": 2.0286932320405243e-06, + "loss": 0.0, + "step": 11350 + }, + { + "epoch": 3.786117568913982, + "grad_norm": 6.828286859672517e-05, + "learning_rate": 2.0135897982105624e-06, + "loss": 0.0, + "step": 11400 + }, + { + "epoch": 3.802723347725008, + "grad_norm": 0.0004246097814757377, + "learning_rate": 1.9984863643806e-06, + "loss": 0.0, + "step": 11450 + }, + { + "epoch": 3.8193291265360347, + "grad_norm": 6.14697128185071e-05, + "learning_rate": 1.983382930550638e-06, + "loss": 0.0, + "step": 11500 + }, + { + "epoch": 3.835934905347061, + "grad_norm": 0.2492341250181198, + "learning_rate": 1.9682794967206756e-06, + "loss": 0.0, + "step": 11550 + }, + { + "epoch": 3.852540684158087, + "grad_norm": 1.8779872334562242e-05, + "learning_rate": 1.953176062890713e-06, + "loss": 0.0, + "step": 11600 + }, + { + "epoch": 3.8691464629691135, + "grad_norm": 1.4916713553247973e-05, + "learning_rate": 1.938072629060751e-06, + "loss": 0.0, + "step": 11650 + }, + { + "epoch": 3.8857522417801396, + "grad_norm": 3.2033625757321715e-05, + "learning_rate": 1.9229691952307887e-06, + "loss": 0.0, + "step": 11700 + }, + { + "epoch": 3.9023580205911657, + "grad_norm": 9.27414839679841e-06, + "learning_rate": 1.9078657614008268e-06, + "loss": 0.0, + "step": 11750 + }, + { + "epoch": 3.918963799402192, + "grad_norm": 6.454864433180774e-06, + "learning_rate": 1.8927623275708646e-06, + "loss": 0.0, + "step": 11800 + }, + { + "epoch": 3.9355695782132183, + "grad_norm": 7.898042895249091e-06, + "learning_rate": 1.8776588937409024e-06, + "loss": 0.0, + "step": 11850 + }, + { + "epoch": 3.9521753570242444, + "grad_norm": 4.904618253931403e-05, + "learning_rate": 1.8625554599109402e-06, + "loss": 0.0, + "step": 11900 + }, + { + "epoch": 3.968781135835271, + "grad_norm": 0.0036423425190150738, + "learning_rate": 1.847452026080978e-06, + "loss": 0.0, + "step": 11950 + }, + { + "epoch": 3.985386914646297, + "grad_norm": 1.6448282622150145e-05, + "learning_rate": 1.8323485922510158e-06, + "loss": 0.0, + "step": 12000 + }, + { + "epoch": 4.0, + "eval_accuracy": 0.9973759541984732, + "eval_f1": 0.9973562673469119, + "eval_loss": 0.0274968221783638, + "eval_precision": 0.9973556002844243, + "eval_recall": 0.9973759541984732, + "eval_runtime": 68.5952, + "eval_samples_per_second": 122.224, + "eval_steps_per_second": 7.639, + "step": 12044 + } + ], + "logging_steps": 50, + "max_steps": 18066, + "num_input_tokens_seen": 0, + "num_train_epochs": 6, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 2.0376989220102144e+17, + "train_batch_size": 16, + "trial_name": null, + "trial_params": null +} diff --git a/trial-8/checkpoint-12044/training_args.bin b/trial-8/checkpoint-12044/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..34adec04cff99d2ceb60dc0f5c3b2216647051c8 --- /dev/null +++ b/trial-8/checkpoint-12044/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11ebe0c2534d4ae7752f2d96389341bfd408897355106562564bff189a14d890 +size 5368 diff --git a/trial-9/checkpoint-1506/config.json b/trial-9/checkpoint-1506/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c2f5f8523b11e91a7470747dc46f5e9681ebd1ba --- /dev/null +++ b/trial-9/checkpoint-1506/config.json @@ -0,0 +1,47 @@ +{ + "_name_or_path": "answerdotai/ModernBERT-large", + "architectures": [ + "ModernBertForSequenceClassification" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 50281, + "classifier_activation": "gelu", + "classifier_bias": false, + "classifier_dropout": 0.0, + "classifier_pooling": "mean", + "cls_token_id": 50281, + "decoder_bias": true, + "deterministic_flash_attn": false, + "embedding_dropout": 0.0, + "eos_token_id": 50282, + "global_attn_every_n_layers": 3, + "global_rope_theta": 160000.0, + "gradient_checkpointing": false, + "hidden_activation": "gelu", + "hidden_size": 1024, + "initializer_cutoff_factor": 2.0, + "initializer_range": 0.02, + "intermediate_size": 2624, + "layer_norm_eps": 1e-05, + "local_attention": 128, + "local_rope_theta": 10000.0, + "max_position_embeddings": 8192, + "mlp_bias": false, + "mlp_dropout": 0.0, + "model_type": "modernbert", + "norm_bias": false, + "norm_eps": 1e-05, + "num_attention_heads": 16, + "num_hidden_layers": 28, + "pad_token_id": 50283, + "position_embedding_type": "absolute", + "problem_type": "single_label_classification", + "reference_compile": true, + "sep_token_id": 50282, + "sparse_pred_ignore_index": -100, + "sparse_prediction": false, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "vocab_size": 50368 +} diff --git a/trial-9/checkpoint-1506/model.safetensors b/trial-9/checkpoint-1506/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..925627136ff1c5c69daad5af8f724f80a1267592 --- /dev/null +++ b/trial-9/checkpoint-1506/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2edb96c0ca8681b40ab169b6a9192e86810f84b31b1ce785f00b5e6187cbb538 +size 1583351632 diff --git a/trial-9/checkpoint-1506/optimizer.pt b/trial-9/checkpoint-1506/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..5d0dafd69287424957ebf4459b79cfb21bd47ec9 --- /dev/null +++ b/trial-9/checkpoint-1506/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5479a24c09dcc160dc37e09839854c182f88580d0d807c4bd6e2091fd2b5d332 +size 3166813178 diff --git a/trial-9/checkpoint-1506/rng_state.pth b/trial-9/checkpoint-1506/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..cf3d91c5392ca6b7d7e0880933b7830a896d7c9e --- /dev/null +++ b/trial-9/checkpoint-1506/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:568428d80a25211a390c359ca51b0b20b38ca0607fbc196f106c9841c02d3e59 +size 14244 diff --git a/trial-9/checkpoint-1506/scheduler.pt b/trial-9/checkpoint-1506/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..42bd8443a9299a5d94e7998213e8e97a0e40bca5 --- /dev/null +++ b/trial-9/checkpoint-1506/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dbc66e986ba41b82e868eb4bfbd153d30d0870e158b9a104f2dc65641280882 +size 1064 diff --git a/trial-9/checkpoint-1506/trainer_state.json b/trial-9/checkpoint-1506/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..269ac13bed4d45b2950c3d3035badbd0dcd4862b --- /dev/null +++ b/trial-9/checkpoint-1506/trainer_state.json @@ -0,0 +1,255 @@ +{ + "best_metric": 0.025783643126487732, + "best_model_checkpoint": "./results/answerdotai/ModernBERT-large/trial-9/checkpoint-1506", + "epoch": 1.0, + "eval_steps": 500, + "global_step": 1506, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.033200531208499334, + "grad_norm": 91.63007354736328, + "learning_rate": 5.34544840343677e-06, + "loss": 0.3686, + "step": 50 + }, + { + "epoch": 0.06640106241699867, + "grad_norm": 1.7436106204986572, + "learning_rate": 5.300709129889425e-06, + "loss": 0.1568, + "step": 100 + }, + { + "epoch": 0.099601593625498, + "grad_norm": 0.2795293927192688, + "learning_rate": 5.25596985634208e-06, + "loss": 0.0528, + "step": 150 + }, + { + "epoch": 0.13280212483399734, + "grad_norm": 2.035282850265503, + "learning_rate": 5.211230582794735e-06, + "loss": 0.0524, + "step": 200 + }, + { + "epoch": 0.16600265604249667, + "grad_norm": 14.907175064086914, + "learning_rate": 5.1664913092473905e-06, + "loss": 0.0518, + "step": 250 + }, + { + "epoch": 0.199203187250996, + "grad_norm": 6.169591903686523, + "learning_rate": 5.121752035700045e-06, + "loss": 0.0319, + "step": 300 + }, + { + "epoch": 0.23240371845949534, + "grad_norm": 13.695945739746094, + "learning_rate": 5.077012762152701e-06, + "loss": 0.0517, + "step": 350 + }, + { + "epoch": 0.2656042496679947, + "grad_norm": 2.6819403171539307, + "learning_rate": 5.032273488605355e-06, + "loss": 0.0285, + "step": 400 + }, + { + "epoch": 0.29880478087649404, + "grad_norm": 0.0010744177270680666, + "learning_rate": 4.98753421505801e-06, + "loss": 0.0376, + "step": 450 + }, + { + "epoch": 0.33200531208499334, + "grad_norm": 0.011883193626999855, + "learning_rate": 4.942794941510665e-06, + "loss": 0.0194, + "step": 500 + }, + { + "epoch": 0.3652058432934927, + "grad_norm": 0.5719029903411865, + "learning_rate": 4.89805566796332e-06, + "loss": 0.0129, + "step": 550 + }, + { + "epoch": 0.398406374501992, + "grad_norm": 3.079650640487671, + "learning_rate": 4.853316394415975e-06, + "loss": 0.0061, + "step": 600 + }, + { + "epoch": 0.4316069057104914, + "grad_norm": 0.010011658072471619, + "learning_rate": 4.808577120868631e-06, + "loss": 0.0187, + "step": 650 + }, + { + "epoch": 0.4648074369189907, + "grad_norm": 0.06711253523826599, + "learning_rate": 4.763837847321286e-06, + "loss": 0.0082, + "step": 700 + }, + { + "epoch": 0.49800796812749004, + "grad_norm": 0.0012383051216602325, + "learning_rate": 4.719098573773941e-06, + "loss": 0.012, + "step": 750 + }, + { + "epoch": 0.5312084993359893, + "grad_norm": 15.470338821411133, + "learning_rate": 4.674359300226596e-06, + "loss": 0.0094, + "step": 800 + }, + { + "epoch": 0.5644090305444888, + "grad_norm": 5.324957370758057, + "learning_rate": 4.629620026679251e-06, + "loss": 0.0156, + "step": 850 + }, + { + "epoch": 0.5976095617529881, + "grad_norm": 0.02133342996239662, + "learning_rate": 4.584880753131907e-06, + "loss": 0.0071, + "step": 900 + }, + { + "epoch": 0.6308100929614874, + "grad_norm": 0.02741403691470623, + "learning_rate": 4.540141479584561e-06, + "loss": 0.0062, + "step": 950 + }, + { + "epoch": 0.6640106241699867, + "grad_norm": 0.014414147473871708, + "learning_rate": 4.495402206037217e-06, + "loss": 0.0052, + "step": 1000 + }, + { + "epoch": 0.6972111553784861, + "grad_norm": 0.004212552681565285, + "learning_rate": 4.4506629324898716e-06, + "loss": 0.0064, + "step": 1050 + }, + { + "epoch": 0.7304116865869854, + "grad_norm": 0.0006867638439871371, + "learning_rate": 4.405923658942527e-06, + "loss": 0.0029, + "step": 1100 + }, + { + "epoch": 0.7636122177954847, + "grad_norm": 0.00018333905609324574, + "learning_rate": 4.361184385395181e-06, + "loss": 0.0077, + "step": 1150 + }, + { + "epoch": 0.796812749003984, + "grad_norm": 4.749911749968305e-05, + "learning_rate": 4.3164451118478365e-06, + "loss": 0.0008, + "step": 1200 + }, + { + "epoch": 0.8300132802124834, + "grad_norm": 0.0021585654467344284, + "learning_rate": 4.271705838300491e-06, + "loss": 0.0028, + "step": 1250 + }, + { + "epoch": 0.8632138114209827, + "grad_norm": 0.1583136022090912, + "learning_rate": 4.226966564753147e-06, + "loss": 0.0121, + "step": 1300 + }, + { + "epoch": 0.896414342629482, + "grad_norm": 0.00355019373819232, + "learning_rate": 4.1822272912058014e-06, + "loss": 0.0059, + "step": 1350 + }, + { + "epoch": 0.9296148738379814, + "grad_norm": 0.005780957173556089, + "learning_rate": 4.137488017658457e-06, + "loss": 0.0001, + "step": 1400 + }, + { + "epoch": 0.9628154050464808, + "grad_norm": 10.613182067871094, + "learning_rate": 4.0927487441111125e-06, + "loss": 0.002, + "step": 1450 + }, + { + "epoch": 0.9960159362549801, + "grad_norm": 0.004242348484694958, + "learning_rate": 4.048009470563767e-06, + "loss": 0.0105, + "step": 1500 + }, + { + "epoch": 1.0, + "eval_accuracy": 0.997256679389313, + "eval_f1": 0.9972423423004315, + "eval_loss": 0.025783643126487732, + "eval_precision": 0.9972372276927278, + "eval_recall": 0.997256679389313, + "eval_runtime": 59.1909, + "eval_samples_per_second": 141.643, + "eval_steps_per_second": 4.426, + "step": 1506 + } + ], + "logging_steps": 50, + "max_steps": 6024, + "num_input_tokens_seen": 0, + "num_train_epochs": 4, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 5.094247305025536e+16, + "train_batch_size": 32, + "trial_name": null, + "trial_params": null +} diff --git a/trial-9/checkpoint-1506/training_args.bin b/trial-9/checkpoint-1506/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..a4073deb2ae64ba33babaa4ba1860cb9da43fbe9 --- /dev/null +++ b/trial-9/checkpoint-1506/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c35ad3522beb32a84479f9bfdbdfbd0c74a8e122a29c3ef7f1bf6f8cdd916a28 +size 5368